From cf4080129f734fb54d4bbe0daf51d4b6b1949ee5 Mon Sep 17 00:00:00 2001
From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com>
Date: Mon, 23 Jan 2023 17:30:08 -0500
Subject: [PATCH 1/5] Standardized format updates.
---
.../prediction_upload/image_predictions.ipynb | 1774 +++++++++--------
1 file changed, 944 insertions(+), 830 deletions(-)
diff --git a/examples/prediction_upload/image_predictions.ipynb b/examples/prediction_upload/image_predictions.ipynb
index d6daa8ce7..2c563c680 100644
--- a/examples/prediction_upload/image_predictions.ipynb
+++ b/examples/prediction_upload/image_predictions.ipynb
@@ -1,1135 +1,1249 @@
{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
"cells": [
{
"cell_type": "markdown",
- "id": "a6a048e8-b5fe-418b-aec4-829b5b6802e5",
"metadata": {
"id": "a6a048e8-b5fe-418b-aec4-829b5b6802e5"
},
"source": [
"
\n",
- " \n",
+ " \n",
" | "
]
},
{
"cell_type": "markdown",
- "id": "51cf1362-1cde-4749-aac7-5fb94473baa7",
"metadata": {
"id": "51cf1362-1cde-4749-aac7-5fb94473baa7"
},
"source": [
+ "\n",
"\n",
- "![]() \n",
" | \n",
"\n",
"\n",
- "![]() \n",
" | "
]
},
{
"cell_type": "markdown",
- "id": "339795d3-e36c-4470-8605-62bfdd5eea29",
- "metadata": {
- "id": "339795d3-e36c-4470-8605-62bfdd5eea29"
- },
"source": [
"# Image Prediction Import\n",
+ "\n",
"* This notebook walks you through the process of uploading model predictions to a Model Run. This notebook provides an example for each supported prediction type for image assets. \n",
"\n",
- "A Model Run is a container for the predictions, annotations and metrics of a specific experiment in your ML model development cycle."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "e76f007b-9465-4acd-9008-20e25e4a4b98",
+ "A Model Run is a container for the predictions, annotations and metrics of a specific experiment in your ML model development cycle.\n",
+ "\n",
+ "**Supported annotations that can be uploaded through the SDK**\n",
+ "\n",
+ "- Bounding box \n",
+ "- Polygon\n",
+ "- Point\n",
+ "- Polyline \n",
+ "- Classification free-text\n",
+ "- Classification - radio\n",
+ "- Classification - checklist\n",
+ "\n",
+ "\n"
+ ],
"metadata": {
- "id": "e76f007b-9465-4acd-9008-20e25e4a4b98"
- },
- "source": [
- "* For information on what types of predictions are supported per data type, refer to this documentation:\n",
- " * https://docs.labelbox.com/docs/upload-model-predictions#step-6-create-the-predictions-payload"
- ]
+ "id": "9znxMjDYGi0Y"
+ }
},
{
"cell_type": "markdown",
- "id": "f53d50fc-8d3c-452b-9aaf-f6170aaa5576",
- "metadata": {
- "id": "f53d50fc-8d3c-452b-9aaf-f6170aaa5576"
- },
"source": [
"* Notes:\n",
" * If you are importing more than 1,000 mask predictions at a time, consider submitting separate jobs, as they can take longer than other prediction types to import.\n",
" * After the execution of this notebook a complete Model Run with predictions will be created in your organization. "
- ]
+ ],
+ "metadata": {
+ "id": "8uOiTLI413Kj"
+ }
},
{
"cell_type": "markdown",
- "id": "5effdaa3-e701-4804-aa33-bbbaed99eb92",
- "metadata": {
- "id": "5effdaa3-e701-4804-aa33-bbbaed99eb92"
- },
"source": [
- "# Installs"
- ]
+ "## Setup"
+ ],
+ "metadata": {
+ "id": "UtJHIuE8HDRI"
+ }
},
{
"cell_type": "code",
- "execution_count": 1,
- "id": "4d63074b-2379-48af-b9d6-2a66190f03c4",
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "4d63074b-2379-48af-b9d6-2a66190f03c4",
- "outputId": "5945adac-cd52-4d1c-9a6d-7429150e2d50"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[K |████████████████████████████████| 184 kB 5.2 MB/s \n",
- "\u001b[K |████████████████████████████████| 7.8 MB 58.9 MB/s \n",
- "\u001b[?25h Building wheel for pygeotile (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
- ]
- }
- ],
"source": [
"!pip install -q 'labelbox[data]'"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "6f3cdca1-524f-4247-a63b-2d4371b0257d",
+ ],
"metadata": {
- "id": "6f3cdca1-524f-4247-a63b-2d4371b0257d"
+ "id": "cm8xMaLbGb7v"
},
- "source": [
- "# Imports"
- ]
+ "execution_count": 600,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 2,
- "id": "01fca8c9-0680-4a9c-a11e-1b49f31e9121",
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "01fca8c9-0680-4a9c-a11e-1b49f31e9121",
- "outputId": "b6197a3c-c819-48ea-ef11-52a4973b1f32"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "3.33.1\n"
- ]
- }
- ],
"source": [
- "import labelbox\n",
"from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option\n",
- "from labelbox.schema.queue_mode import QueueMode\n",
- "from labelbox import Client, LabelingFrontend, LabelImport, MediaType\n",
+ "from labelbox import Client, MALPredictionImport, LabelImport\n",
+ "from labelbox.data.serialization import NDJsonConverter\n",
+ "from labelbox.schema.media_type import MediaType\n",
"from labelbox.data.annotation_types import (\n",
" Label, ImageData, ObjectAnnotation, MaskData,\n",
" Rectangle, Point, Line, Mask, Polygon,\n",
" Radio, Checklist, Text,\n",
" ClassificationAnnotation, ClassificationAnswer\n",
")\n",
- "from labelbox.data.serialization import NDJsonConverter\n",
- "import json\n",
"import uuid\n",
- "import copy\n",
"import numpy as np\n",
- "print(labelbox.__version__)"
- ]
+ "from labelbox.schema.queue_mode import QueueMode"
+ ],
+ "metadata": {
+ "id": "NIq-6M9kHKSs"
+ },
+ "execution_count": 601,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "a72d96e8-33ce-434d-b330-393e1c31702a",
- "metadata": {
- "id": "a72d96e8-33ce-434d-b330-393e1c31702a"
- },
"source": [
- "# API Key and Client\n",
- "Provide a valid api key below in order to properly connect to the Labelbox Client."
- ]
+ "## Replace with your API Key \n",
+ "Guides on [Create an API key](https://docs.labelbox.com/docs/create-an-api-key)"
+ ],
+ "metadata": {
+ "id": "pZ2rBqY8HQoe"
+ }
},
{
"cell_type": "code",
- "execution_count": 3,
- "id": "86003724-4807-4281-95c1-5284a6f9609f",
- "metadata": {
- "id": "86003724-4807-4281-95c1-5284a6f9609f"
- },
- "outputs": [],
"source": [
- "# Add your api key as a string\n",
- "API_KEY = \"\"\n",
- "client = Client(api_key=API_KEY)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "960998ac-bde4-4184-8b7d-26d8e019cc7f",
+ "API_KEY = None\n",
+ "client = Client(API_KEY)"
+ ],
"metadata": {
- "id": "960998ac-bde4-4184-8b7d-26d8e019cc7f"
+ "id": "z7ZLKLYLHP__"
},
- "source": [
- "---- \n",
- "### Steps\n",
- "1. Make sure project is setup\n",
- "2. Collect annotations\n",
- "3. Upload"
- ]
+ "execution_count": 602,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "82a7381b-2409-4ed3-9d25-881a1e1d8ca6",
- "metadata": {
- "id": "82a7381b-2409-4ed3-9d25-881a1e1d8ca6"
- },
"source": [
- "### Create a Model Run (for predictions) and a Project (for annotations)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "d51fd2e1-bf5f-4c61-bca1-929f43b076ed",
+ "## Supported Predictions"
+ ],
"metadata": {
- "id": "d51fd2e1-bf5f-4c61-bca1-929f43b076ed"
- },
- "source": [
- "We will be creating \n",
- "- a Model and a Model Run to contain model predictions\n",
- "- a project to contain annotations"
- ]
+ "id": "OePiibbed0nG"
+ }
},
{
- "cell_type": "markdown",
- "id": "49i_juOUr6av",
- "metadata": {
- "id": "49i_juOUr6av"
- },
+ "cell_type": "code",
"source": [
- "First, we create an ontology with all the possible tools and classifications supported for images. The official list of supported predictions and annotations that can be uploaded can be found:\n",
- "- [predictions that can be uploaded to a Model Run](https://docs.labelbox.com/docs/upload-model-predictions#step-6-create-the-predictions-payload)\n",
- "- [annotations that can be imported in a project as ground-truths](https://docs.labelbox.com/docs/import-ground-truth)\n",
+ "########### Radio Classification ###########\n",
"\n",
- "Note: the ontology of the Model Run does not need to match the ontology of the project. However, only the features present in the Model Run ontology can be uploaded as predictions and annotations to the Model Run."
- ]
+ "# Python annotation\n",
+ "radio_prediction = ClassificationAnnotation(\n",
+ " name=\"radio_question\", \n",
+ " value=Radio(answer = ClassificationAnswer(name = \"second_radio_answer\", confidence=0.5))\n",
+ ")\n",
+ "\n",
+ "# NDJSON\n",
+ "radio_prediction_ndjson = {\n",
+ " 'name': 'radio_question',\n",
+ " 'answer': {'name': 'second_radio_answer', 'confidence': 0.5}\n",
+ "} "
+ ],
+ "metadata": {
+ "id": "v5wL6oojz9Ge"
+ },
+ "execution_count": 603,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 6,
- "id": "f9f9287c-aad7-4914-bc87-1453fb8bce81",
- "metadata": {
- "id": "f9f9287c-aad7-4914-bc87-1453fb8bce81"
- },
- "outputs": [],
"source": [
- "ontology_builder = OntologyBuilder(\n",
- " tools=[ # List of Tool objects\n",
- " Tool( # Bounding Box tool given the name \"box\"\n",
- " tool=Tool.Type.BBOX, \n",
- " name=\"box\"), \n",
- " Tool( # Polyline tool given the name \"line\"\n",
- " tool=Tool.Type.LINE, \n",
- " name=\"line\"), \n",
- " Tool( # Point tool given the name \"point\"\n",
- " tool=Tool.Type.POINT, \n",
- " name=\"point\"), \n",
- " Tool( # Polygon tool given the name \"polygon\"\n",
- " tool=Tool.Type.POLYGON, \n",
- " name=\"polygon\"), \n",
- " Tool( # Segmentation mask tool given the name \"mask\"\n",
- " tool=Tool.Type.SEGMENTATION, \n",
- " name=\"mask\")], \n",
- " classifications=[ # List of Classification objects\n",
- " Classification( # Text classification given the name \"text\"\n",
- " class_type=Classification.Type.TEXT,\n",
- " instructions=\"text\"), \n",
- " Classification( # Checklist classification given the name \"text\" with two options: \"first_checklist_answer\" and \"second_checklist_answer\"\n",
- " class_type=Classification.Type.CHECKLIST, \n",
- " instructions=\"checklist\", \n",
- " options=[\n",
- " Option(value=\"first_checklist_answer\"),\n",
- " Option(value=\"second_checklist_answer\") \n",
- " ]\n",
- " ), \n",
- " Classification( # Radio classification given the name \"text\" with two options: \"first_radio_answer\" and \"second_radio_answer\"\n",
- " class_type=Classification.Type.RADIO, \n",
- " instructions=\"radio\", \n",
- " options=[\n",
- " Option(value=\"first_radio_answer\"),\n",
- " Option(value=\"second_radio_answer\")\n",
- " ]\n",
+ "########## Nested Classifications ##########\n",
+ "\n",
+ "\n",
+ "### Radio #### \n",
+ "# Python annotation \n",
+ "nested_radio_prediction = ClassificationAnnotation(\n",
+ " name=\"nested_radio_question\", \n",
+ " value=Radio(answer = ClassificationAnswer(name = \"first_radio_answer\" , confidence=0.5)),\n",
+ " classifications=[\n",
+ " \tClassificationAnnotation(\n",
+ " \tname=\"sub_radio_question\",\n",
+ " \t\tvalue=Radio(answer=ClassificationAnswer(name=\"first_sub_radio_answer\", confidence=0.5))\n",
" )\n",
" ]\n",
")\n",
"\n",
- "ontology = client.create_ontology(\"Ontology Image\", ontology_builder.asdict())"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1GdimALBuzRU",
+ "\n",
+ "# NDJSON \n",
+ "nested_radio_prediction_ndjson = {\n",
+ " \"name\": \"nested_radio_question\",\n",
+ " \"answer\": { \"name\": \"first_radio_answer\", \"confidence\": 0.5 },\n",
+ " \"classifications\" : [\n",
+ " {\n",
+ " \"name\": \"sub_radio_question\", \n",
+ " \"answer\": {\"name\": \"first_sub_radio_answer\", \"confidence\": 0.5 }\n",
+ " }\n",
+ " ]\n",
+ "}\n",
+ "\n",
+ "### Checklist #### \n",
+ "## Only supported with NDJSON tools\n",
+ "\n",
+ "nested_checklist_prediction_ndjson = {\n",
+ " \"name\": \"nested_checklist_question\",\n",
+ " \"answer\": [{\n",
+ " \"name\": \"first_checklist_answer\", \n",
+ " \"confidence\": 0.5,\n",
+ " \"classifications\" : [\n",
+ " {\n",
+ " \"name\": \"sub_checklist_question\", \n",
+ " \"answer\": {\"name\": \"first_sub_checklist_answer\", \"confidence\": 0.5 }\n",
+ " } \n",
+ " ] \n",
+ " }]\n",
+ "}\n",
+ "\n"
+ ],
"metadata": {
- "id": "1GdimALBuzRU"
+ "id": "I75K-wx7_sDs"
},
- "source": [
- "We create a Model and a Model Run, to contain the predictions. "
- ]
+ "execution_count": 604,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 7,
- "id": "hANaXddn5Krs",
- "metadata": {
- "id": "hANaXddn5Krs"
- },
- "outputs": [],
"source": [
- "# create Model\n",
- "model = client.create_model(name=\"image_model_run\", \n",
- " ontology_id=ontology.uid)\n",
- "# create Model Run\n",
- "model_run = model.create_model_run(\"iteration 1\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "EC_D3TFX5LBo",
+ "############ Checklist ############\n",
+ "\n",
+ "# Python Annotations\n",
+ "checklist_prediction = ClassificationAnnotation(\n",
+ " name=\"checklist_question\", # must match your ontology feature's name\n",
+ " value=Checklist(\n",
+ " answer = [\n",
+ " ClassificationAnswer(\n",
+ " name = \"first_checklist_answer\", \n",
+ " confidence=0.5\n",
+ " ), \n",
+ " ClassificationAnswer(\n",
+ " name = \"second_checklist_answer\", \n",
+ " confidence=0.5\n",
+ " )\n",
+ " ]\n",
+ " )\n",
+ " )\n",
+ "\n",
+ "# NDJSON\n",
+ "checklist_prediction_ndjson = {\n",
+ " 'name': 'checklist_question',\n",
+ " 'answer': [\n",
+ " {'name': 'first_checklist_answer' , 'confidence': 0.5},\n",
+ " {'name': 'second_checklist_answer', 'confidence': 0.5}\n",
+ " ]\n",
+ "}"
+ ],
"metadata": {
- "id": "EC_D3TFX5LBo"
+ "id": "b2UjSoYez9I1"
},
- "source": [
- "We create a project, to contain the annotations."
- ]
+ "execution_count": 605,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 8,
- "id": "044e9194-d21d-403e-b64c-047c1063b0fe",
- "metadata": {
- "id": "044e9194-d21d-403e-b64c-047c1063b0fe"
- },
- "outputs": [],
"source": [
- "# Create a Labelbox project\n",
- "project = client.create_project(name=\"image_project\", \n",
- " queue_mode=QueueMode.Batch,\n",
- " # Quality Settings setup \n",
- " auto_audit_percentage=1,\n",
- " auto_audit_number_of_labels=1,\n",
- " media_type=MediaType.Image)\n",
- "project.setup_editor(ontology)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "o9JbDSBH5fsF",
+ "####### Bounding box #######\n",
+ "\n",
+ "\n",
+ "# Python Annotation \n",
+ "bbox_prediction = ObjectAnnotation(\n",
+ " name = \"bounding_box\", # must match your ontology feature's name\n",
+ " confidence=0.5, \n",
+ " value=Rectangle(\n",
+ " start=Point(x=977, y=1690), # Top left\n",
+ " end=Point(x=330, y=225), # Bottom right\n",
+ " ),\n",
+ " \n",
+ ")\n",
+ "\n",
+ "#NDJSON \n",
+ "bbox_prediction_ndjson = {\n",
+ " 'name': 'bounding_box', \n",
+ " 'confidence': 0.5,\n",
+ " 'bbox': {\n",
+ " \"top\": 977,\n",
+ " \"left\": 1690,\n",
+ " \"height\": 330,\n",
+ " \"width\": 225\n",
+ " }\n",
+ "}\n"
+ ],
"metadata": {
- "id": "o9JbDSBH5fsF"
+ "id": "xCU4JRP0z9Nh"
},
- "source": [
- "### Create a dataset with a data row\n",
- "We will upload predictions and annotations on this data row. "
- ]
+ "execution_count": 606,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 9,
- "id": "WCFSlblL5gDc",
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "WCFSlblL5gDc",
- "outputId": "571c1b3f-9d20-4b6c-a30f-87c20dd1dba8"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- }
- ],
"source": [
- "# # Create one Labelbox dataset\n",
- "dataset = client.create_dataset(name=\"image_prediction_import_demo_dataset\")\n",
- "# Grab an example image and create a Labelbox data row in the dataset\n",
- "uploads = {\n",
- " \"row_data\": \"https://raw.githubusercontent.com/Labelbox/labelbox-python/develop/examples/assets/2560px-Kitano_Street_Kobe01s5s4110.jpg\",\n",
- " # To learn more about Global Keys : https://docs.labelbox.com/docs/global-keys\n",
- " \"global_key\": \"TEST-ID-%id\" % uuid.uuid1()\n",
- " }\n",
- "data_row = dataset.create_data_row(uploads)\n",
- "print(data_row)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "8eRGvN8ynJD6",
- "metadata": {
- "id": "8eRGvN8ynJD6"
- },
- "source": [
- "### Send the data row to the Model Run and to the project"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "U-yBDwZuBn_M",
+ "####### Bounding box with nested classification #######\n",
+ "bbox_with_radio_subclass_prediction = ObjectAnnotation(\n",
+ " name=\"bbox_with_radio_subclass\",\n",
+ " confidence=0.5, # must match your ontology feature's name\n",
+ " value=Rectangle(\n",
+ " start=Point(x=933, y=541), # Top left\n",
+ " end=Point(x=191, y=330), # Bottom right\n",
+ " ),\n",
+ " classifications=[\n",
+ " \tClassificationAnnotation(\n",
+ " \tname=\"sub_radio_question\",\n",
+ " \t\tvalue=Radio(answer=ClassificationAnswer(name=\"first_sub_radio_answer\", confidence=0.5))\n",
+ " )\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "\n",
+ "## NDJSON\n",
+ "bbox_with_radio_subclass_prediction_ndjson = {\n",
+ " \"name\": \"bbox_with_radio_subclass\", \n",
+ " \"confidence\": 0.5,\n",
+ " \"classifications\": [{\n",
+ " \"name\": \"sub_radio_question\",\n",
+ " \"confidence\": 0.5,\n",
+ " \"answer\": \n",
+ " { \"name\":\"first_sub_radio_answer\", \"confidence\": 0.5}\n",
+ " \n",
+ " }],\n",
+ " \"bbox\": {\n",
+ " \"top\": 933,\n",
+ " \"left\": 541,\n",
+ " \"height\": 191,\n",
+ " \"width\": 330\n",
+ " }\n",
+ "}"
+ ],
"metadata": {
- "id": "U-yBDwZuBn_M"
+ "id": "gAIzsxEjLmhv"
},
- "source": [
- "Get the data row IDs that we just uploaded"
- ]
+ "execution_count": 607,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 10,
- "id": "nphpP2OmBnGQ",
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "nphpP2OmBnGQ",
- "outputId": "9bb863b3-c02e-4d9a-f184-633ec60cd523"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "datarow_ids: ['clbycrqin0kfb08wea4mp0jkb']\n"
- ]
- }
- ],
"source": [
- "# Data row ID(s) to send to the Model Run and to the project.\n",
- "datarow_ids = [dr.uid for dr in list(dataset.export_data_rows())]\n",
- "print(\"datarow_ids: \",datarow_ids)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "38FLeQKMBF9z",
+ "########## Polygon ##########\n",
+ "# Python Anotation \n",
+ "polygon_prediction = ObjectAnnotation(\n",
+ " name = \"polygon\", # must match your ontology feature's name \n",
+ " confidence = 0.5, \n",
+ " value=Polygon( # Coordinates for the verticies of your polygon\n",
+ " points=[Point(x=1489.581,y=183.934),Point(x=2278.306,y=256.885),Point(x=2428.197,y=200.437),Point(x=2560.0,y=335.419),\n",
+ " Point(x=2557.386,y=503.165),Point(x=2320.596,y=503.103),Point(x=2156.083, y=628.943),Point(x=2161.111,y=785.519),\n",
+ " Point(x=2002.115, y=894.647),Point(x=1838.456,y=877.874),Point(x=1436.53,y=874.636),Point(x=1411.403,y=758.579),\n",
+ " Point(x=1353.853,y=751.74),Point(x=1345.264, y=453.461),Point(x=1426.011,y=421.129)]\n",
+ " ),\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# NDJSON\n",
+ "\n",
+ "polygon_prediction_ndjson = {\n",
+ " 'name': 'polygon',\n",
+ " 'confidence': 0.5,\n",
+ " 'polygon': [\n",
+ " {'x': 1489.581, 'y': 183.934},\n",
+ " {'x': 2278.306, 'y': 256.885},\n",
+ " {'x': 2428.197, 'y': 200.437},\n",
+ " {'x': 2560.0, 'y': 335.419},\n",
+ " {'x': 2557.386, 'y': 503.165},\n",
+ " {'x': 2320.596, 'y': 503.103},\n",
+ " {'x': 2156.083, 'y': 628.943},\n",
+ " {'x': 2161.111, 'y': 785.519},\n",
+ " {'x': 2002.115, 'y': 894.647},\n",
+ " {'x': 1838.456, 'y': 877.874},\n",
+ " {'x': 1436.53, 'y': 874.636},\n",
+ " {'x': 1411.403, 'y': 758.579},\n",
+ " {'x': 1353.853, 'y': 751.74},\n",
+ " {'x': 1345.264, 'y': 453.461},\n",
+ " {'x': 1426.011, 'y': 421.129},\n",
+ " {'x': 1489.581, 'y': 183.934}\n",
+ " ]\n",
+ "}"
+ ],
"metadata": {
- "id": "38FLeQKMBF9z"
+ "id": "jRwfE4MFz9Ph"
},
- "source": [
- "Send the data row to the Model Run"
- ]
+ "execution_count": 608,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 11,
- "id": "T1vk_EvzBI3u",
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "T1vk_EvzBI3u",
- "outputId": "e866fab9-2d89-4ea1-9b7f-ec1436834643"
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "True"
- ]
- },
- "execution_count": 11,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
"source": [
- "model_run.upsert_data_rows(datarow_ids)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "5D7wBx41BJa9",
+ "####### Free text #######\n",
+ "# Confidence is not supported for text prediction\n",
+ "# Python annotation\n",
+ "text_annotation = ClassificationAnnotation(\n",
+ " name=\"free_text\", # must match your ontology feature's name\n",
+ " value=Text(answer=\"sample text\")\n",
+ ")\n",
+ "\n",
+ "# NDJSON\n",
+ "text_annotation_ndjson = {\n",
+ " 'name': 'free_text',\n",
+ " 'answer': 'sample text',\n",
+ "}"
+ ],
"metadata": {
- "id": "5D7wBx41BJa9"
+ "id": "PBB37YpWTiVR"
},
- "source": [
- "Send the data row to the project"
- ]
+ "execution_count": 609,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 12,
- "id": "yfNPsINLnPcO",
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "yfNPsINLnPcO",
- "outputId": "1ab9fbde-126c-4a21-da18-ea70c52c4d75"
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- ""
- ]
- },
- "execution_count": 12,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "project.create_batch(\n",
- " \"first-batch\", # Each batch in a project must have a unique name\n",
- " datarow_ids, # A list of data rows or data row ids\n",
- " 5 # priority between 1(Highest) - 5(lowest)\n",
- ")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "8da94c48-72a5-4535-ab66-6d14b0b79aed",
- "metadata": {
- "id": "8da94c48-72a5-4535-ab66-6d14b0b79aed"
- },
"source": [
- "### Create the predictions payload\n",
- "We will upload it to the Model Run.\n",
+ "######### Segmentation mask #########\n",
"\n",
+ "# Python \n",
+ "# Identifying what values in the numpy array correspond to the mask annotation\n",
+ "color = (0, 0, 0)\n",
"\n",
- "It is recommended to use the Python SDK's annotation types when importing labels into Labelbox."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "ee9b9191-6c0d-4cba-859b-e2e9a1b887c8",
+ "# convert a polygon to mask\n",
+ "im_height, im_width = 100,100 #need to provide the height and width of image.\n",
+ "mask_data = MaskData(arr=\n",
+ " polygon_prediction.value.draw(height=im_height,width=im_width,color=color))\n",
+ "\n",
+ "# convert a 2D array to 3D array\n",
+ "arr_2d = np.zeros((100,100), dtype='uint8')\n",
+ "mask_data = MaskData.from_2D_arr(arr_2d)\n",
+ "\n",
+ "# a 3D array where 3rd axis is RGB values.\n",
+ "mask_data = MaskData(arr= np.zeros([400,450,3],dtype='uint8'))\n",
+ "\n",
+ "mask_prediction = ObjectAnnotation(\n",
+ " name = \"mask\", # must match your ontology feature's name\n",
+ " confidence=0.5,\n",
+ " value=Mask(mask=mask_data, color=color),\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# NDJSON\n",
+ "mask_prediction_ndjson = {\n",
+ " 'name': 'mask',\n",
+ " 'confidence': 0.5,\n",
+ " 'classifications': [],\n",
+ " 'mask': {'instanceURI': 'https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA',\n",
+ " 'colorRGB': (0, 0, 0)}\n",
+ "}\n",
+ "\n"
+ ],
"metadata": {
- "id": "ee9b9191-6c0d-4cba-859b-e2e9a1b887c8"
+ "id": "39vz-tYsz9Ry"
},
- "source": [
- "Object predictions"
- ]
+ "execution_count": 610,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 13,
- "id": "qzBqhV4Pv3yp",
- "metadata": {
- "id": "qzBqhV4Pv3yp"
- },
- "outputs": [],
"source": [
- "# Confidence scores are optional.\n",
- "# If no confidence is provided, \n",
- "# the prediction will be treated as if the confidence score equals 1\n",
+ "######## Point ########\n",
"\n",
- "point_prediction=ObjectAnnotation(\n",
- " value=Point(x=850,y=150), # Coordinates for this point annotation\n",
- " name=\"point\", # Name of the tool in your ontology\n",
- " confidence=0.5\n",
+ "# Python Annotation\n",
+ "point_prediction = ObjectAnnotation(\n",
+ " name = \"point\", # must match your ontology feature's name\n",
+ " confidence=0.5,\n",
+ " value = Point(x=1166.606, y=1441.768),\n",
")\n",
"\n",
- "box_prediction=ObjectAnnotation(\n",
- " value=Rectangle( # Coordinates for the top-left and bottom-right points of your bounding box, respectively\n",
- " start=Point(x=537,y=878),\n",
- " end=Point(x=832,y=1120)\n",
- " ),\n",
- " name=\"box\", # Name of the tool in your ontology\n",
- " confidence=0.5\n",
- ")\n",
- "\n",
- "polyline_prediction=ObjectAnnotation(\n",
- " value=Line( # Coordinates for the keypoints in your polyline\n",
- " points=[Point(x=2514.353, y=229.471),Point(x=2409.492, y=152.092),Point(x=2254.322, y=201.962),Point(x=2204.491, y=140.463),Point(x=2116.123, y=254.716),\n",
- " Point(x=1752.247, y=133.949),Point(x=1753.838, y=34.438),Point(x=1539.772, y=32.61),Point(x=1543.442, y=107.552),\n",
- " Point(x=1438.869, y=124.903),Point(x=1408.941, y=308.149),Point(x=1143.128, y=370.815),Point(x=822.067, y=219.007),\n",
- " Point(x=782.367, y=319.216),Point(x=620.273, y=314.408),Point(x=573.114, y=238.16),Point(x=327.559, y=218.251),\n",
- " Point(x=318.087, y=125.064),Point(x=226.557, y=117.286),Point(x=216.648, y=235.61),Point(x=40.929, y=306.412)]\n",
- " ),\n",
- " name=\"line\", # Name of the tool in your ontology\n",
- " confidence=0.5\n",
- ")\n",
- "\n",
- "polygon_prediction=ObjectAnnotation(\n",
- " value=Polygon( # Coordinates for the verticies of your polygon\n",
- " points=[Point(x=1389.581,y=183.934),Point(x=2178.306,y=256.885),Point(x=2328.197,y=200.437),Point(x=2460.0,y=335.419),\n",
- " Point(x=2457.386,y=503.165),Point(x=2220.596,y=503.103),Point(x=2056.083, y=628.943),Point(x=2061.111,y=785.519),\n",
- " Point(x=1902.115, y=894.647),Point(x=1738.456,y=877.874),Point(x=1336.53,y=874.636),Point(x=1311.403,y=758.579),\n",
- " Point(x=1253.853,y=751.74),Point(x=1245.264, y=453.461),Point(x=1326.011,y=421.129)]\n",
- " ),\n",
- " name=\"polygon\", # Name of the tool in your ontology\n",
- " confidence=0.5\n",
- ")\n",
"\n",
- "mask_prediction=ObjectAnnotation(\n",
- " value=Mask( # Numpy array representation of a segmentation mask with the corresponding values within the array that represent the segmentation mask\n",
- " mask=MaskData(\n",
- " arr=np.zeros([300,350,3],dtype='uint8') # Creating an example numpy array to represent a mask creating a square from pixels 0,0 to 128,128\n",
- " ),\n",
- " color=(0,0,0) # Identifying what values in the numpy array correspond to the mask annotation (since I made a 3-D array with all zeroes, here it's 0,0,0)\n",
- " ),\n",
- " name=\"mask\", # Name of the tool in your ontology\n",
- " confidence=0.5\n",
- ")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "291f9c97-37ba-42f5-b8f0-e118bdc5c848",
+ "# NDJSON\n",
+ "point_prediction_ndjson = {\n",
+ " 'name': 'point',\n",
+ " 'confidence': 0.5,\n",
+ " 'classifications': [],\n",
+ " 'point': {'x': 1166.606, 'y': 1441.768}\n",
+ "}"
+ ],
"metadata": {
- "id": "291f9c97-37ba-42f5-b8f0-e118bdc5c848"
+ "id": "UelSiWN2z9Tg"
},
- "source": [
- "Classification predictions"
- ]
+ "execution_count": 611,
+ "outputs": []
},
{
"cell_type": "code",
- "execution_count": 14,
- "id": "f2RtQQPCymOB",
- "metadata": {
- "id": "f2RtQQPCymOB"
- },
- "outputs": [],
"source": [
- "# Confidence scores are optional.\n",
- "# If no confidence is provided, \n",
- "# the prediction will be treated as if the confidence score equals 1\n",
+ "###### Polyline ######\n",
"\n",
- "checklist_prediction=ClassificationAnnotation(\n",
- " value=Checklist(\n",
- " answer=[ # List of the checklist answers in your ontology\n",
- " ClassificationAnswer(\n",
- " name=\"first_checklist_answer\",\n",
- " confidence=0.5\n",
- " ),\n",
- " ClassificationAnswer(\n",
- " name=\"second_checklist_answer\",\n",
- " confidence=0.5\n",
- " )\n",
- " ]\n",
- " ), \n",
- " name=\"checklist\" # Name of the classification in your ontology\n",
- ")\n",
"\n",
- "radio_prediction=ClassificationAnnotation(\n",
- " value=Radio(\n",
- " answer=ClassificationAnswer(\n",
- " name=\"first_radio_answer\", # Name of the radio answer in your ontology\n",
- " confidence=0.5\n",
- " )\n",
- " ), \n",
- " name=\"radio\" # Name of the classification in your ontology\n",
+ "# Python Annotation \n",
+ "\n",
+ "polyline_prediction = ObjectAnnotation(\n",
+ " name = \"polyline\", # must match your ontology feature's name\n",
+ " # confidence=0.5, ## Not supported for python annotation tools\n",
+ " value=Line( # Coordinates for the keypoints in your polyline\n",
+ " points=[Point(x=2534.353, y=249.471),Point(x=2429.492, y=182.092),Point(x=2294.322, y=221.962),Point(x=2224.491, y=180.463),Point(x=2136.123, y=204.716),\n",
+ " Point(x=1712.247, y=173.949),Point(x=1703.838, y=84.438),Point(x=1579.772, y=82.61),Point(x=1583.442, y=167.552),\n",
+ " Point(x=1478.869, y=164.903),Point(x=1418.941, y=318.149),Point(x=1243.128, y=400.815),Point(x=1022.067, y=319.007),\n",
+ " Point(x=892.367, y=379.216),Point(x=670.273, y=364.408),Point(x=613.114, y=288.16),Point(x=377.559, y=238.251),\n",
+ " Point(x=368.087, y=185.064),Point(x=246.557, y=167.286),Point(x=236.648, y=285.61),Point(x=90.929, y=326.412)]\n",
+ " ),\n",
")\n",
"\n",
- "# Confidence is not supported for text prediction\n",
- "text_prediction=ClassificationAnnotation(\n",
- " value=Text( # String value for the text annotation\n",
- " answer=\"the answer to the text question\",\n",
- " ), \n",
- " name=\"text\" # Name of the classification in your ontology\n",
- ")\n"
- ]
+ "# NDJSON\n",
+ "polyline_prediction_ndjson = {\n",
+ " 'name': 'polyline',\n",
+ " 'confidence':0.5,\n",
+ " 'classifications': [],\n",
+ " 'line': [\n",
+ " {'x': 2534.353, 'y': 249.471},\n",
+ " {'x': 2429.492, 'y': 182.092},\n",
+ " {'x': 2294.322, 'y': 221.962},\n",
+ " {'x': 2224.491, 'y': 180.463},\n",
+ " {'x': 2136.123, 'y': 204.716},\n",
+ " {'x': 1712.247, 'y': 173.949},\n",
+ " {'x': 1703.838, 'y': 84.438},\n",
+ " {'x': 1579.772, 'y': 82.61},\n",
+ " {'x': 1583.442, 'y': 167.552},\n",
+ " {'x': 1478.869, 'y': 164.903},\n",
+ " {'x': 1418.941, 'y': 318.149},\n",
+ " {'x': 1243.128, 'y': 400.815},\n",
+ " {'x': 1022.067, 'y': 319.007},\n",
+ " {'x': 892.367, 'y': 379.216},\n",
+ " {'x': 670.273, 'y': 364.408},\n",
+ " {'x': 613.114, 'y': 288.16},\n",
+ " {'x': 377.559, 'y': 238.251},\n",
+ " {'x': 368.087, 'y': 185.064},\n",
+ " {'x': 246.557, 'y': 167.286},\n",
+ " {'x': 236.648, 'y': 285.61},\n",
+ " {'x': 90.929, 'y': 326.412}\n",
+ " ]\n",
+ "}\n"
+ ],
+ "metadata": {
+ "id": "mrjb8qY3z9VY"
+ },
+ "execution_count": 612,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "15bd593b-509d-4114-af95-ae0be081c42d",
- "metadata": {
- "id": "15bd593b-509d-4114-af95-ae0be081c42d"
- },
"source": [
- "Create a Label object with all of the predictions created previously."
- ]
+ "## Step 1: Import data rows into Catalog"
+ ],
+ "metadata": {
+ "id": "U-o15yu9IPDo"
+ }
},
{
"cell_type": "code",
- "execution_count": 15,
- "id": "6d72fe25-ff7e-4e0a-94cf-095e4df73da0",
+ "source": [
+ "# send a sample image as batch to the project\n",
+ "test_img_url = {\n",
+ " \"row_data\": \"https://raw.githubusercontent.com/Labelbox/labelbox-python/develop/examples/assets/2560px-Kitano_Street_Kobe01s5s4110.jpg\",\n",
+ " \"global_key\": str(uuid.uuid4())\n",
+ "}\n",
+ "dataset = client.create_dataset(name=\"image_prediction_demo\")\n",
+ "data_row = dataset.create_data_row(test_img_url)\n",
+ "print(data_row)"
+ ],
"metadata": {
+ "id": "HjH9gTV8IBG9",
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "6d72fe25-ff7e-4e0a-94cf-095e4df73da0",
- "outputId": "407eef2f-7931-45fe-f4f6-a098cf1a1dc6"
+ "outputId": "58f7cf62-5f30-468b-c83c-58b65a2d10c6"
},
+ "execution_count": 613,
"outputs": [
{
- "data": {
- "text/plain": [
- "{'uid': None,\n",
- " 'data': ImageData(im_bytes=None,file_path=None,url=None,arr=None),\n",
- " 'annotations': [ObjectAnnotation(confidence=0.5, name='point', feature_schema_id=None, extra={}, value=Point(extra={}, x=850.0, y=150.0), classifications=[]),\n",
- " ObjectAnnotation(confidence=0.5, name='box', feature_schema_id=None, extra={}, value=Rectangle(extra={}, start=Point(extra={}, x=537.0, y=878.0), end=Point(extra={}, x=832.0, y=1120.0)), classifications=[]),\n",
- " ClassificationAnnotation(name='text', feature_schema_id=None, extra={}, value=Text(answer='the answer to the text question')),\n",
- " ClassificationAnnotation(name='checklist', feature_schema_id=None, extra={}, value=Checklist(name='checklist', answer=[ClassificationAnswer(confidence=0.5, name='first_checklist_answer', feature_schema_id=None, extra={}, keyframe=None), ClassificationAnswer(confidence=0.5, name='second_checklist_answer', feature_schema_id=None, extra={}, keyframe=None)])),\n",
- " ClassificationAnnotation(name='radio', feature_schema_id=None, extra={}, value=Radio(answer=ClassificationAnswer(confidence=0.5, name='first_radio_answer', feature_schema_id=None, extra={}, keyframe=None))),\n",
- " ObjectAnnotation(confidence=0.5, name='polygon', feature_schema_id=None, extra={}, value=Polygon(extra={}, points=[Point(extra={}, x=1389.581, y=183.934), Point(extra={}, x=2178.306, y=256.885), Point(extra={}, x=2328.197, y=200.437), Point(extra={}, x=2460.0, y=335.419), Point(extra={}, x=2457.386, y=503.165), Point(extra={}, x=2220.596, y=503.103), Point(extra={}, x=2056.083, y=628.943), Point(extra={}, x=2061.111, y=785.519), Point(extra={}, x=1902.115, y=894.647), Point(extra={}, x=1738.456, y=877.874), Point(extra={}, x=1336.53, y=874.636), Point(extra={}, x=1311.403, y=758.579), Point(extra={}, x=1253.853, y=751.74), Point(extra={}, x=1245.264, y=453.461), Point(extra={}, x=1326.011, y=421.129), Point(extra={}, x=1389.581, y=183.934)]), classifications=[]),\n",
- " ObjectAnnotation(confidence=0.5, name='mask', feature_schema_id=None, extra={}, value=Mask(extra={}, mask=MaskData(im_bytes=None,file_path=None,url=https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2Fd2543c9e-656c-b2ba-e9ff-602dff29148c-1?Expires=1671755760855&KeyName=labelbox-assets-key-3&Signature=DmHaF41rIGM-fL9TxixxX3k3ynU,arr=...), color=(0, 0, 0)), classifications=[]),\n",
- " ObjectAnnotation(confidence=None, name='line', feature_schema_id=None, extra={}, value=Line(extra={}, points=[Point(extra={}, x=2514.353, y=229.471), Point(extra={}, x=2409.492, y=152.092), Point(extra={}, x=2254.322, y=201.962), Point(extra={}, x=2204.491, y=140.463), Point(extra={}, x=2116.123, y=254.716), Point(extra={}, x=1752.247, y=133.949), Point(extra={}, x=1753.838, y=34.438), Point(extra={}, x=1539.772, y=32.61), Point(extra={}, x=1543.442, y=107.552), Point(extra={}, x=1438.869, y=124.903), Point(extra={}, x=1408.941, y=308.149), Point(extra={}, x=1143.128, y=370.815), Point(extra={}, x=822.067, y=219.007), Point(extra={}, x=782.367, y=319.216), Point(extra={}, x=620.273, y=314.408), Point(extra={}, x=573.114, y=238.16), Point(extra={}, x=327.559, y=218.251), Point(extra={}, x=318.087, y=125.064), Point(extra={}, x=226.557, y=117.286), Point(extra={}, x=216.648, y=235.61), Point(extra={}, x=40.929, y=306.412)]), classifications=[])],\n",
- " 'extra': {}}"
- ]
- },
- "execution_count": 15,
- "metadata": {},
- "output_type": "execute_result"
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "\n"
+ ]
}
- ],
- "source": [
- "# Create a Label object by identifying the applicavle data row in Labelbox and providing a list of annotations\n",
- "label_prediction = Label(\n",
- " data=ImageData(\n",
- " uid=data_row.uid),\n",
- " annotations = [\n",
- " point_prediction, box_prediction, text_prediction, \n",
- " checklist_prediction, radio_prediction,\n",
- " polygon_prediction, mask_prediction, polyline_prediction, \n",
- "\n",
- " ]\n",
- ")\n",
- "\n",
- "# Create urls to mask data for upload\n",
- "def signing_function(obj_bytes: bytes) -> str:\n",
- " url = client.upload_data(content=obj_bytes, sign=True)\n",
- " return url\n",
- "\n",
- "label_prediction.add_url_to_masks(signing_function)\n",
- "\n",
- "label_prediction.__dict__"
]
},
{
"cell_type": "markdown",
- "id": "gAva__YCCzjL",
- "metadata": {
- "id": "gAva__YCCzjL"
- },
"source": [
- "### Create the annotations payload\n",
- "We will upload it to the project.\n",
- "\n",
- "It is recommended to use the Python SDK's annotation types when importing labels into Labelbox."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "wbhzltpNCzjL",
+ "## Step 2: Create/select an Ontology for your model predictions\n",
+ "Your project should have the correct ontology setup with all the tools and classifications supported for your annotations, and the tool names and classification instructions should match the name/instructions fields in your annotations to ensure the correct feature schemas are matched.\n"
+ ],
"metadata": {
- "id": "wbhzltpNCzjL"
- },
- "source": [
- "Object annotations"
- ]
+ "id": "oy0umzuNIceP"
+ }
},
{
"cell_type": "code",
- "execution_count": 16,
- "id": "apatq9StCzjM",
- "metadata": {
- "id": "apatq9StCzjM"
- },
- "outputs": [],
"source": [
- "point_annotation=ObjectAnnotation(\n",
- " value=Point(x=882,y=159), # Coordinates for this point annotation\n",
- " name=\"point\" # Name of the tool in your ontology\n",
- ")\n",
- "\n",
- "box_annotation=ObjectAnnotation(\n",
- " value=Rectangle( # Coordinates for the top-left and bottom-right points of your bounding box, respectively\n",
- " start=Point(x=557,y=898),\n",
- " end=Point(x=852,y=1140)\n",
- " ),\n",
- " name=\"box\" # Name of the tool in your ontology\n",
- ")\n",
- "\n",
- "polyline_annotation=ObjectAnnotation(\n",
- " value=Line( # Coordinates for the keypoints in your polyline\n",
- " points=[Point(x=2534.353, y=249.471),Point(x=2429.492, y=182.092),Point(x=2294.322, y=221.962),Point(x=2224.491, y=180.463),Point(x=2136.123, y=204.716),\n",
- " Point(x=1712.247, y=173.949),Point(x=1703.838, y=84.438),Point(x=1579.772, y=82.61),Point(x=1583.442, y=167.552),\n",
- " Point(x=1478.869, y=164.903),Point(x=1418.941, y=318.149),Point(x=1243.128, y=400.815),Point(x=1022.067, y=319.007),\n",
- " Point(x=892.367, y=379.216),Point(x=670.273, y=364.408),Point(x=613.114, y=288.16),Point(x=377.559, y=238.251),\n",
- " Point(x=368.087, y=185.064),Point(x=246.557, y=167.286),Point(x=236.648, y=285.61),Point(x=90.929, y=326.412)]\n",
+ "ontology_builder = OntologyBuilder(\n",
+ " classifications=[ # List of Classification objects\n",
+ " Classification( # Radio classification given the name \"text\" with two options: \"first_radio_answer\" and \"second_radio_answer\"\n",
+ " class_type=Classification.Type.RADIO, \n",
+ " instructions=\"radio_question\", \n",
+ " options=[\n",
+ " Option(value=\"first_radio_answer\"),\n",
+ " Option(value=\"second_radio_answer\")\n",
+ " ]\n",
" ),\n",
- " name=\"line\" # Name of the tool in your ontology\n",
- ")\n",
- "\n",
- "polygon_annotation=ObjectAnnotation(\n",
- " value=Polygon( # Coordinates for the verticies of your polygon\n",
- " points=[Point(x=1489.581,y=183.934),Point(x=2278.306,y=256.885),Point(x=2428.197,y=200.437),Point(x=2560.0,y=335.419),\n",
- " Point(x=2557.386,y=503.165),Point(x=2320.596,y=503.103),Point(x=2156.083, y=628.943),Point(x=2161.111,y=785.519),\n",
- " Point(x=2002.115, y=894.647),Point(x=1838.456,y=877.874),Point(x=1436.53,y=874.636),Point(x=1411.403,y=758.579),\n",
- " Point(x=1353.853,y=751.74),Point(x=1345.264, y=453.461),Point(x=1426.011,y=421.129)]\n",
+ " Classification( # Checklist classification given the name \"text\" with two options: \"first_checklist_answer\" and \"second_checklist_answer\"\n",
+ " class_type=Classification.Type.CHECKLIST, \n",
+ " instructions=\"checklist_question\", \n",
+ " options=[\n",
+ " Option(value=\"first_checklist_answer\"),\n",
+ " Option(value=\"second_checklist_answer\") \n",
+ " ]\n",
+ " ), \n",
+ " Classification( # Text classification given the name \"text\"\n",
+ " class_type=Classification.Type.TEXT,\n",
+ " instructions=\"free_text\"\n",
" ),\n",
- " name=\"polygon\" # Name of the tool in your ontology\n",
+ " Classification(\n",
+ " class_type=Classification.Type.RADIO, \n",
+ " instructions=\"nested_radio_question\",\n",
+ " options=[\n",
+ " Option(\"first_radio_answer\",\n",
+ " options=[\n",
+ " Classification(\n",
+ " class_type=Classification.Type.RADIO,\n",
+ " instructions=\"sub_radio_question\",\n",
+ " options=[Option(\"first_sub_radio_answer\")]\n",
+ " )\n",
+ " ]\n",
+ " )\n",
+ " ] \n",
+ " ),\n",
+ " Classification(\n",
+ " class_type=Classification.Type.CHECKLIST, \n",
+ " instructions=\"nested_checklist_question\",\n",
+ " options=[\n",
+ " Option(\"first_checklist_answer\",\n",
+ " options=[\n",
+ " Classification(\n",
+ " class_type=Classification.Type.CHECKLIST, \n",
+ " instructions=\"sub_checklist_question\", \n",
+ " options=[Option(\"first_sub_checklist_answer\")]\n",
+ " )\n",
+ " ]\n",
+ " )\n",
+ " ]\n",
+ " ), \n",
+ " ],\n",
+ " tools=[ # List of Tool objects\n",
+ " Tool( # Bounding Box tool given the name \"box\"\n",
+ " tool=Tool.Type.BBOX, \n",
+ " name=\"bounding_box\"), \n",
+ " Tool( # Bounding Box tool given the name \"box\"\n",
+ " tool=Tool.Type.BBOX, \n",
+ " name=\"bbox_with_radio_subclass\",\n",
+ " classifications=[\n",
+ " Classification(\n",
+ " class_type=Classification.Type.RADIO,\n",
+ " instructions=\"sub_radio_question\",\n",
+ " options=[\n",
+ " Option(value=\"first_sub_radio_answer\")\n",
+ " ]\n",
+ " ),\n",
+ " ]\n",
+ " ), \n",
+ " Tool( # Polygon tool given the name \"polygon\"\n",
+ " tool=Tool.Type.POLYGON, \n",
+ " name=\"polygon\"),\n",
+ " Tool( # Segmentation mask tool given the name \"mask\"\n",
+ " tool=Tool.Type.SEGMENTATION, \n",
+ " name=\"mask\"),\n",
+ " \t Tool( # Point tool given the name \"point\"\n",
+ " tool=Tool.Type.POINT, \n",
+ " name=\"point\"), \n",
+ " Tool( # Polyline tool given the name \"line\"\n",
+ " tool=Tool.Type.LINE, \n",
+ " name=\"polyline\")]\n",
")\n",
"\n",
- "mask_annotation=ObjectAnnotation(\n",
- " value=Mask( # Numpy array representation of a segmentation mask with the corresponding values within the array that represent the segmentation mask\n",
- " mask=MaskData(\n",
- " arr=np.zeros([400,450,3],dtype='uint8') # Creating an example numpy array to represent a mask creating a square from pixels 0,0 to 128,128\n",
- " ),\n",
- " color=(0,0,0) # Identifying what values in the numpy array correspond to the mask annotation (since I made a 3-D array with all zeroes, here it's 0,0,0)\n",
- " ),\n",
- " name=\"mask\" # Name of the tool in your ontology\n",
- ")"
- ]
+ "ontology = client.create_ontology(\"Image Prediction Import Demo\", ontology_builder.asdict(), media_type=MediaType.Image)"
+ ],
+ "metadata": {
+ "id": "Kt4XWWqgIiWk"
+ },
+ "execution_count": 614,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "aqSYAaBiCzjN",
- "metadata": {
- "id": "aqSYAaBiCzjN"
- },
"source": [
- "Classification annotations"
- ]
+ "## Step 3: Create a Model and Model Run"
+ ],
+ "metadata": {
+ "id": "ZjN8jxHvIvHP"
+ }
},
{
"cell_type": "code",
- "execution_count": 17,
- "id": "9NAdIp6OCzjN",
+ "source": [
+ "# create Model\n",
+ "model = client.create_model(name=\"image_model_run_\" + str(uuid.uuid4()),\n",
+ " ontology_id=ontology.uid)\n",
+ "# create Model Run\n",
+ "model_run = model.create_model_run(\"iteration 1\")"
+ ],
"metadata": {
- "id": "9NAdIp6OCzjN"
+ "id": "8n-AvzdiOR6d"
},
- "outputs": [],
- "source": [
- "text_annotation=ClassificationAnnotation(\n",
- " value=Text( # String value for the text annotation\n",
- " answer=\"the answer to the text question\" \n",
- " ), \n",
- " name=\"text\" # Name of the classification in your ontology\n",
- ")\n",
- "\n",
- "checklist_annotation=ClassificationAnnotation(\n",
- " value=Checklist(\n",
- " answer=[ # List of the checklist answers in your ontology\n",
- " ClassificationAnswer(name=\"first_checklist_answer\"),\n",
- " ClassificationAnswer(name=\"second_checklist_answer\")\n",
- " ]\n",
- " ), \n",
- " name=\"checklist\" # Name of the classification in your ontology\n",
- ")\n",
- "\n",
- "radio_annotation=ClassificationAnnotation(\n",
- " value=Radio(\n",
- " answer=ClassificationAnswer(\n",
- " name=\"second_radio_answer\" # Name of the radio answer in your ontology\n",
- " )\n",
- " ), \n",
- " name=\"radio\" # Name of the classification in your ontology\n",
- ")"
- ]
+ "execution_count": 615,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "zMcYJxpMCzjN",
- "metadata": {
- "id": "zMcYJxpMCzjN"
- },
"source": [
- "Create a Label object with all of the annotations created previously."
- ]
+ "## Step 4: Send data rows to the Model Run"
+ ],
+ "metadata": {
+ "id": "NX6L0axRJN5J"
+ }
},
{
"cell_type": "code",
- "execution_count": 18,
- "id": "OHofSRa1CzjN",
+ "source": [
+ "model_run.upsert_data_rows([data_row.uid])"
+ ],
"metadata": {
+ "id": "6sngCgIwJSae",
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "OHofSRa1CzjN",
- "outputId": "c5d0c340-96b9-4200-f7d0-646fab1f7c1b"
+ "outputId": "5c546b9b-1336-4192-9815-8120e7b53122"
},
+ "execution_count": 616,
"outputs": [
{
+ "output_type": "execute_result",
"data": {
"text/plain": [
- "{'uid': None,\n",
- " 'data': ImageData(im_bytes=None,file_path=None,url=None,arr=None),\n",
- " 'annotations': [ObjectAnnotation(confidence=None, name='point', feature_schema_id=None, extra={}, value=Point(extra={}, x=882.0, y=159.0), classifications=[]),\n",
- " ObjectAnnotation(confidence=None, name='box', feature_schema_id=None, extra={}, value=Rectangle(extra={}, start=Point(extra={}, x=557.0, y=898.0), end=Point(extra={}, x=852.0, y=1140.0)), classifications=[]),\n",
- " ObjectAnnotation(confidence=None, name='line', feature_schema_id=None, extra={}, value=Line(extra={}, points=[Point(extra={}, x=2534.353, y=249.471), Point(extra={}, x=2429.492, y=182.092), Point(extra={}, x=2294.322, y=221.962), Point(extra={}, x=2224.491, y=180.463), Point(extra={}, x=2136.123, y=204.716), Point(extra={}, x=1712.247, y=173.949), Point(extra={}, x=1703.838, y=84.438), Point(extra={}, x=1579.772, y=82.61), Point(extra={}, x=1583.442, y=167.552), Point(extra={}, x=1478.869, y=164.903), Point(extra={}, x=1418.941, y=318.149), Point(extra={}, x=1243.128, y=400.815), Point(extra={}, x=1022.067, y=319.007), Point(extra={}, x=892.367, y=379.216), Point(extra={}, x=670.273, y=364.408), Point(extra={}, x=613.114, y=288.16), Point(extra={}, x=377.559, y=238.251), Point(extra={}, x=368.087, y=185.064), Point(extra={}, x=246.557, y=167.286), Point(extra={}, x=236.648, y=285.61), Point(extra={}, x=90.929, y=326.412)]), classifications=[]),\n",
- " ObjectAnnotation(confidence=None, name='polygon', feature_schema_id=None, extra={}, value=Polygon(extra={}, points=[Point(extra={}, x=1489.581, y=183.934), Point(extra={}, x=2278.306, y=256.885), Point(extra={}, x=2428.197, y=200.437), Point(extra={}, x=2560.0, y=335.419), Point(extra={}, x=2557.386, y=503.165), Point(extra={}, x=2320.596, y=503.103), Point(extra={}, x=2156.083, y=628.943), Point(extra={}, x=2161.111, y=785.519), Point(extra={}, x=2002.115, y=894.647), Point(extra={}, x=1838.456, y=877.874), Point(extra={}, x=1436.53, y=874.636), Point(extra={}, x=1411.403, y=758.579), Point(extra={}, x=1353.853, y=751.74), Point(extra={}, x=1345.264, y=453.461), Point(extra={}, x=1426.011, y=421.129), Point(extra={}, x=1489.581, y=183.934)]), classifications=[]),\n",
- " ObjectAnnotation(confidence=None, name='mask', feature_schema_id=None, extra={}, value=Mask(extra={}, mask=MaskData(im_bytes=None,file_path=None,url=https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F54fa3928-8aac-c97e-b0ad-353e0ca84e51-1?Expires=1671755761195&KeyName=labelbox-assets-key-3&Signature=zf-AAyerLdXdpYOTq-djtDyBWDk,arr=...), color=(0, 0, 0)), classifications=[]),\n",
- " ClassificationAnnotation(name='text', feature_schema_id=None, extra={}, value=Text(answer='the answer to the text question')),\n",
- " ClassificationAnnotation(name='checklist', feature_schema_id=None, extra={}, value=Checklist(name='checklist', answer=[ClassificationAnswer(confidence=None, name='first_checklist_answer', feature_schema_id=None, extra={}, keyframe=None), ClassificationAnswer(confidence=None, name='second_checklist_answer', feature_schema_id=None, extra={}, keyframe=None)])),\n",
- " ClassificationAnnotation(name='radio', feature_schema_id=None, extra={}, value=Radio(answer=ClassificationAnswer(confidence=None, name='second_radio_answer', feature_schema_id=None, extra={}, keyframe=None)))],\n",
- " 'extra': {}}"
+ "True"
]
},
- "execution_count": 18,
"metadata": {},
- "output_type": "execute_result"
+ "execution_count": 616
}
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Step 5. Create the predictions payload\n",
+ "\n",
+ "Create the annotations payload using the snippets of code in [Supported python annotation types and NDJSON](https://colab.research.google.com/drive/14ZdYPz8s58D07AOUjKuJ54rFjM0JVNlf#scrollTo=OePiibbed0nG&line=1&uniqifier=1).\n",
+ "\n",
+ "The resulting label_ndjson should have exactly the same content for annotations that are supported by both (with exception of the uuid strings that are generated)"
],
+ "metadata": {
+ "id": "6FZyvnrqSGuc"
+ }
+ },
+ {
+ "cell_type": "code",
"source": [
- "# Create a Label object by identifying the applicavle data row in Labelbox and providing a list of annotations\n",
- "label_annotation = Label(\n",
- " data=ImageData(\n",
- " uid=data_row.uid),\n",
+ "# Create a Label for predictions\n",
+ "label_prediction = Label(\n",
+ " data=ImageData(uid=data_row.uid),\n",
" annotations = [\n",
- " point_annotation, box_annotation, polyline_annotation, polygon_annotation, mask_annotation,\n",
- " text_annotation, checklist_annotation, radio_annotation\n",
- " ]\n",
+ " radio_prediction,\n",
+ " checklist_prediction, \n",
+ " bbox_prediction, \n",
+ " bbox_with_radio_subclass_prediction, \n",
+ " polygon_prediction, \n",
+ " mask_prediction, \n",
+ " point_prediction,\n",
+ " nested_radio_prediction\n",
+ " ]\n",
")\n",
"\n",
- "# Create urls to mask data for upload\n",
- "def signing_function(obj_bytes: bytes) -> str:\n",
- " url = client.upload_data(content=obj_bytes, sign=True)\n",
- " return url\n",
- "\n",
- "label_annotation.add_url_to_masks(signing_function)\n",
+ "# Create a label list \n",
+ "label_list_prediction = [label_prediction]\n",
"\n",
- "label_annotation.__dict__"
- ]
+ "# Convert the prediction label from a Labelbox class object to the underlying NDJSON format required for upload - uploads can be directly built in this syntax as well\n",
+ "ndjson_prediction = list(NDJsonConverter.serialize(label_list_prediction))"
+ ],
+ "metadata": {
+ "id": "zv2OLTXKSGWv"
+ },
+ "execution_count": 617,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "KIEvdVzTGL09",
+ "source": [
+ "If using NDJSON"
+ ],
+ "metadata": {
+ "id": "HaIjOzZggv56"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "\n",
+ "ndjson_prediction_method2 = []\n",
+ "for annot in [\n",
+ " radio_prediction_ndjson,\n",
+ " checklist_prediction_ndjson, \n",
+ " bbox_prediction_ndjson, \n",
+ " bbox_with_radio_subclass_prediction_ndjson, \n",
+ " polygon_prediction_ndjson, \n",
+ " mask_prediction_ndjson, \n",
+ " point_prediction_ndjson,\n",
+ " polyline_prediction_ndjson,\n",
+ " text_annotation_ndjson, \n",
+ " nested_radio_prediction_ndjson,\n",
+ " nested_checklist_prediction_ndjson\n",
+ " \n",
+ "]:\n",
+ " annot.update({\n",
+ " 'uuid': str(uuid.uuid4()),\n",
+ " 'dataRow': {'id': data_row.uid},\n",
+ " })\n",
+ " ndjson_prediction_method2.append(annot)"
+ ],
"metadata": {
- "id": "KIEvdVzTGL09"
+ "id": "F-Y7sSyAV3tn"
},
+ "execution_count": 618,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
"source": [
- "### Upload the annotations payload to the project"
- ]
+ "## Step 6. Upload the predictions payload to the Model Run "
+ ],
+ "metadata": {
+ "id": "viFHCnBeTD1Y"
+ }
},
{
"cell_type": "code",
- "execution_count": 19,
- "id": "d_95gQMAGJXq",
+ "source": [
+ "# Upload the prediction label to the Model Run\n",
+ "upload_job_prediction = model_run.add_predictions(\n",
+ " name=\"prediction_upload_job\"+str(uuid.uuid4()),\n",
+ " predictions=ndjson_prediction_method2)\n",
+ "\n",
+ "# Errors will appear for annotation uploads that failed.\n",
+ "print(\"Errors:\", upload_job_prediction.errors)"
+ ],
"metadata": {
+ "id": "0VN3ZRzyb4cl",
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "d_95gQMAGJXq",
- "outputId": "cab6f910-4bc7-452b-f5b5-ee0eb6f54091"
+ "outputId": "9dedec2e-f24a-4800-8b54-48af9d293c07"
},
+ "execution_count": 619,
"outputs": [
{
- "name": "stdout",
"output_type": "stream",
+ "name": "stdout",
"text": [
"Errors: []\n"
]
}
- ],
- "source": [
- "## Create a label list \n",
- "label_list_annotation = [label_annotation]\n",
- "\n",
- "# Convert the annotation label from a Labelbox class object to the underlying NDJSON format required for upload - uploads can be directly built in this syntax as well\n",
- "ndjson_annotation = list(NDJsonConverter.serialize(label_list_annotation))\n",
- "\n",
- "# Upload the annotation label to the project using Label Import\n",
- "upload_job_annotation = LabelImport.create_from_objects(\n",
- " client = client,\n",
- " project_id = project.uid,\n",
- " name=\"annotation_import_job\",\n",
- " labels=ndjson_annotation)\n",
- "\n",
- "# This will provide information only after the upload_job is complete, so we do not need to worry about having to rerun\n",
- "upload_job_annotation.wait_until_done()\n",
- "# Errors will appear for annotation uploads that failed.\n",
- "print(\"Errors:\", upload_job_annotation.errors)"
]
},
{
"cell_type": "markdown",
- "id": "tcQpab5_GR72",
- "metadata": {
- "id": "tcQpab5_GR72"
- },
"source": [
- "### Send the annotations to the Model Run"
- ]
+ "## Step 7: Send annotations to a model run\n",
+ "To visualize both annotations and predictions in the model run we will create a project with ground truth annotations. \n",
+ "To send annotations to a Model Run, we must first import them into a project, create a label payload and then send them to the Model Run."
+ ],
+ "metadata": {
+ "id": "T-ZHWWI3JgmX"
+ }
},
{
"cell_type": "markdown",
- "id": "5I45AW4OHJvq",
- "metadata": {
- "id": "5I45AW4OHJvq"
- },
"source": [
- "Get the label IDs that we just uploaded\n"
- ]
+ "##### 7.1. Create a labelbox project"
+ ],
+ "metadata": {
+ "id": "CYRiqHr2O_aL"
+ }
},
{
"cell_type": "code",
- "execution_count": 20,
- "id": "wGwk8s0SHiIg",
+ "source": [
+ "# Create a Labelbox project\n",
+ "project = client.create_project(name=\"image_prediction_demo\", \n",
+ " queue_mode=QueueMode.Batch,\n",
+ " # Quality Settings setup \n",
+ " auto_audit_percentage=1,\n",
+ " auto_audit_number_of_labels=1,\n",
+ " media_type=MediaType.Image)\n",
+ "project.setup_editor(ontology)"
+ ],
"metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "wGwk8s0SHiIg",
- "outputId": "59d5ec00-6830-48f0-e5d0-655a35393ec8"
+ "id": "jEtoDiDrPFvI"
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "label_ids: ['clbycrz8s000l0g3ejhyi3w4g']\n"
- ]
- }
- ],
+ "execution_count": 620,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
"source": [
- "# get the labels id from the project\n",
- "label_ids = [x['ID'] for x in project.export_labels(download=True)]\n",
- "print(\"label_ids: \",label_ids)"
- ]
+ "##### 7.2. Create a batch to send to the project "
+ ],
+ "metadata": {
+ "id": "7FEyC-nBPPuD"
+ }
},
{
"cell_type": "code",
- "execution_count": 21,
- "id": "nZVuxM5yGR73",
+ "source": [
+ "project.create_batch(\n",
+ " \"batch_predictions_demo\", # Each batch in a project must have a unique name\n",
+ " dataset.export_data_rows(), # A list of data rows or data row ids\n",
+ " 5 # priority between 1(Highest) - 5(lowest)\n",
+ ")"
+ ],
"metadata": {
+ "id": "WRr5tdVEPXXy",
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "nZVuxM5yGR73",
- "outputId": "bc8e870e-5312-4b22-ef44-153548335894"
+ "outputId": "d7d38c81-42f6-40a4-f73f-2bf3d6de54b2"
},
+ "execution_count": 621,
"outputs": [
{
+ "output_type": "execute_result",
"data": {
"text/plain": [
- "True"
+ ""
]
},
- "execution_count": 21,
"metadata": {},
- "output_type": "execute_result"
+ "execution_count": 621
}
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "##### 7.3 Create the annotations payload"
],
+ "metadata": {
+ "id": "FTGAI730UlZ3"
+ }
+ },
+ {
+ "cell_type": "code",
"source": [
- "model_run.upsert_labels(label_ids)"
- ]
+ "########### Annotations ###########\n",
+ "radio_annotation_ndjson = {\n",
+ " 'name': 'radio_question',\n",
+ " 'answer': {'name': 'second_radio_answer'}\n",
+ "} \n",
+ "\n",
+ "nested_radio_annotation_ndjson = {\n",
+ " \"name\": \"nested_radio_question\",\n",
+ " \"answer\": {\"name\": \"first_radio_answer\"},\n",
+ " \"classifications\" : [\n",
+ " {'name': 'sub_radio_question', 'answer': {'name': 'first_sub_radio_answer'}}\n",
+ " ]\n",
+ "}\n",
+ "\n",
+ "checklist_annotation_ndjson = {\n",
+ " 'name': 'checklist_question',\n",
+ " 'answer': [\n",
+ " {'name': 'first_checklist_answer'},\n",
+ " {'name': 'second_checklist_answer'}\n",
+ " ]\n",
+ "}\n",
+ "\n",
+ "bbox_annotation_ndjson = {\n",
+ " 'name': 'bounding_box',\n",
+ " 'bbox': {\n",
+ " \"top\": 977,\n",
+ " \"left\": 1690,\n",
+ " \"height\": 330,\n",
+ " \"width\": 225\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "bbox_with_radio_subclass_ndjson = {\n",
+ " \"name\": \"bbox_with_radio_subclass\", \n",
+ " \"classifications\": [{\n",
+ " \"name\": \"sub_radio_question\",\n",
+ " \"answer\": \n",
+ " { \"name\":\"first_sub_radio_answer\" }\n",
+ " \n",
+ " }],\n",
+ " \"bbox\": {\n",
+ " \"top\": 933,\n",
+ " \"left\": 541,\n",
+ " \"height\": 191,\n",
+ " \"width\": 330\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "polygon_annotation_ndjson = {\n",
+ " 'name': 'polygon',\n",
+ " 'polygon': [\n",
+ " {'x': 1489.581, 'y': 183.934},\n",
+ " {'x': 2278.306, 'y': 256.885},\n",
+ " {'x': 2428.197, 'y': 200.437},\n",
+ " {'x': 2560.0, 'y': 335.419},\n",
+ " {'x': 2557.386, 'y': 503.165},\n",
+ " {'x': 2320.596, 'y': 503.103},\n",
+ " {'x': 2156.083, 'y': 628.943},\n",
+ " {'x': 2161.111, 'y': 785.519},\n",
+ " {'x': 2002.115, 'y': 894.647},\n",
+ " {'x': 1838.456, 'y': 877.874},\n",
+ " {'x': 1436.53, 'y': 874.636},\n",
+ " {'x': 1411.403, 'y': 758.579},\n",
+ " {'x': 1353.853, 'y': 751.74},\n",
+ " {'x': 1345.264, 'y': 453.461},\n",
+ " {'x': 1426.011, 'y': 421.129},\n",
+ " {'x': 1489.581, 'y': 183.934}\n",
+ " ]\n",
+ "}\n",
+ "\n",
+ "mask_annotation_ndjson = {\n",
+ " 'name': 'mask',\n",
+ " 'classifications': [],\n",
+ " 'mask': {'instanceURI': 'https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA',\n",
+ " 'colorRGB': (0, 0, 0)}\n",
+ "}\n",
+ "\n",
+ "\n",
+ "point_annotation_ndjson = {\n",
+ " 'name': 'point',\n",
+ " 'classifications': [],\n",
+ " 'point': {'x': 1166.606, 'y': 1441.768}\n",
+ "}\n",
+ "\n",
+ "point_annotation_ndjson = {\n",
+ " 'name': 'point',\n",
+ " 'classifications': [],\n",
+ " 'point': {'x': 1166.606, 'y': 1441.768}\n",
+ "}\n",
+ "\n",
+ "polyline_annotation_ndjson = {\n",
+ " 'name': 'polyline',\n",
+ " 'classifications': [],\n",
+ " 'line': [\n",
+ " {'x': 2534.353, 'y': 249.471},\n",
+ " {'x': 2429.492, 'y': 182.092},\n",
+ " {'x': 2294.322, 'y': 221.962},\n",
+ " {'x': 2224.491, 'y': 180.463},\n",
+ " {'x': 2136.123, 'y': 204.716},\n",
+ " {'x': 1712.247, 'y': 173.949},\n",
+ " {'x': 1703.838, 'y': 84.438},\n",
+ " {'x': 1579.772, 'y': 82.61},\n",
+ " {'x': 1583.442, 'y': 167.552},\n",
+ " {'x': 1478.869, 'y': 164.903},\n",
+ " {'x': 1418.941, 'y': 318.149},\n",
+ " {'x': 1243.128, 'y': 400.815},\n",
+ " {'x': 1022.067, 'y': 319.007},\n",
+ " {'x': 892.367, 'y': 379.216},\n",
+ " {'x': 670.273, 'y': 364.408},\n",
+ " {'x': 613.114, 'y': 288.16},\n",
+ " {'x': 377.559, 'y': 238.251},\n",
+ " {'x': 368.087, 'y': 185.064},\n",
+ " {'x': 246.557, 'y': 167.286},\n",
+ " {'x': 236.648, 'y': 285.61},\n",
+ " {'x': 90.929, 'y': 326.412}\n",
+ " ]\n",
+ "}\n",
+ "\n",
+ "nested_checklist_prediction_ndjson = {\n",
+ " \"name\": \"nested_checklist_question\",\n",
+ " \"answer\": [{\n",
+ " \"name\": \"first_checklist_answer\", \n",
+ " \"classifications\" : [\n",
+ " {\n",
+ " \"name\": \"sub_checklist_question\", \n",
+ " \"answer\": {\"name\": \"first_sub_checklist_answer\"}\n",
+ " } \n",
+ " ] \n",
+ " }]\n",
+ "}\n"
+ ],
+ "metadata": {
+ "id": "A8_HVvu9Uvfl"
+ },
+ "execution_count": 622,
+ "outputs": []
},
{
"cell_type": "markdown",
- "id": "mFlJY439GSHl",
+ "source": [
+ "##### 7.4. Create the label object"
+ ],
+ "metadata": {
+ "id": "8QwmguFvPltl"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Create a Label object by identifying the applicavle data row in Labelbox and providing a list of annotations\n",
+ "ndjson_annotation = []\n",
+ "for annot in [\n",
+ " radio_annotation_ndjson, \n",
+ " checklist_annotation_ndjson, \n",
+ " bbox_annotation_ndjson, \n",
+ " bbox_with_radio_subclass_ndjson, \n",
+ " polygon_annotation_ndjson, \n",
+ " mask_annotation_ndjson, \n",
+ " point_annotation_ndjson, \n",
+ " polyline_annotation_ndjson,\n",
+ " nested_radio_annotation_ndjson,\n",
+ " nested_checklist_prediction_ndjson\n",
+ "]:\n",
+ " annot.update({\n",
+ " 'uuid': str(uuid.uuid4()),\n",
+ " 'dataRow': {'id': data_row.uid},\n",
+ " })\n",
+ " ndjson_annotation.append(annot) \n",
+ "\n"
+ ],
"metadata": {
- "id": "mFlJY439GSHl"
+ "id": "9gD_alThQA3G"
},
+ "execution_count": 623,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
"source": [
- "### Upload the predictions payload to the Model Run"
- ]
+ "##### 7.5. Upload annotations to the project using Label Import"
+ ],
+ "metadata": {
+ "id": "nGVNQlvPQ-kF"
+ }
},
{
"cell_type": "code",
- "execution_count": 22,
- "id": "HFgB6qaSGSHm",
+ "source": [
+ "upload_job_annotation = LabelImport.create_from_objects(\n",
+ " client = client,\n",
+ " project_id = project.uid,\n",
+ " name=\"annotation_import_\" + str(uuid.uuid4()),\n",
+ " labels=ndjson_annotation)\n",
+ "\n",
+ "upload_job_annotation.wait_until_done()\n",
+ "# Errors will appear for annotation uploads that failed.\n",
+ "print(\"Errors:\", upload_job_annotation.errors)\n"
+ ],
"metadata": {
+ "id": "HYh9AzrlRYX-",
"colab": {
"base_uri": "https://localhost:8080/"
},
- "id": "HFgB6qaSGSHm",
- "outputId": "6c521aa5-9fb9-49b6-c0f7-4503f2070c7d"
+ "outputId": "ea4cce4e-dab0-4ee6-857d-f5c3deff7e6a"
},
+ "execution_count": 624,
"outputs": [
{
- "name": "stdout",
"output_type": "stream",
+ "name": "stdout",
"text": [
"Errors: []\n"
]
}
- ],
- "source": [
- "## Create a label list \n",
- "label_list_prediction = [label_prediction]\n",
- "\n",
- "# Convert the prediction label from a Labelbox class object to the underlying NDJSON format required for upload - uploads can be directly built in this syntax as well\n",
- "ndjson_prediction = list(NDJsonConverter.serialize(label_list_prediction))\n",
- "\n",
- "# Upload the prediction label to the Model Run\n",
- "upload_job_prediction = model_run.add_predictions(\n",
- " name=\"prediction_upload_job\"+str(uuid.uuid4()),\n",
- " predictions=ndjson_prediction)\n",
- "\n",
- "# Errors will appear for annotation uploads that failed.\n",
- "print(\"Errors:\", upload_job_prediction.errors)\n"
]
},
{
"cell_type": "markdown",
- "id": "OhgYk6byutP4",
- "metadata": {
- "id": "OhgYk6byutP4"
- },
"source": [
- "## Cleanup "
- ]
+ "##### 7.6 Send the annotations to the Model Run"
+ ],
+ "metadata": {
+ "id": "Y3rgM-5cRrxM"
+ }
},
{
"cell_type": "code",
- "execution_count": 23,
- "id": "_9FDSkrhur2q",
+ "source": [
+ "# get the labels id from the project\n",
+ "label_ids = [x['ID'] for x in project.export_labels(download=True)]\n",
+ "model_run.upsert_labels(label_ids)"
+ ],
"metadata": {
- "id": "_9FDSkrhur2q"
+ "id": "i2BrS8CcSBzo",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "outputId": "9c40b5e4-3828-4d65-895d-641e5024aff7"
},
- "outputs": [],
- "source": [
- "# mal_project.delete()\n",
- "# li_project.delete()\n",
- "# dataset.delete()"
+ "execution_count": 625,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "metadata": {},
+ "execution_count": 625
+ }
]
- }
- ],
- "metadata": {
- "colab": {
- "provenance": []
},
- "kernelspec": {
- "display_name": "Python 3.9.2 64-bit",
- "language": "python",
- "name": "python3"
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Optional deletions for cleanup \n"
+ ],
+ "metadata": {
+ "id": "DMtOfWWDWFbJ"
+ }
},
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
+ {
+ "cell_type": "code",
+ "source": [
+ "#upload_job\n",
+ "# project.delete()\n",
+ "# dataset.delete()"
+ ],
+ "metadata": {
+ "id": "aAhkyvJlWK1p"
},
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.2"
- },
- "vscode": {
- "interpreter": {
- "hash": "397704579725e15f5c7cb49fe5f0341eb7531c82d19f2c29d197e8b64ab5776b"
- }
+ "execution_count": 626,
+ "outputs": []
}
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ ]
}
From ecd682e6b7db599c2f589c6153334779e75f7913 Mon Sep 17 00:00:00 2001
From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com>
Date: Mon, 23 Jan 2023 17:42:49 -0500
Subject: [PATCH 2/5] Uncomment confidence score for polyline.
A patch to allow for confidence score in python annotation tools for polyline has been deployed.
---
.../prediction_upload/image_predictions.ipynb | 84 +++++++++----------
1 file changed, 42 insertions(+), 42 deletions(-)
diff --git a/examples/prediction_upload/image_predictions.ipynb b/examples/prediction_upload/image_predictions.ipynb
index 2c563c680..881b248aa 100644
--- a/examples/prediction_upload/image_predictions.ipynb
+++ b/examples/prediction_upload/image_predictions.ipynb
@@ -96,7 +96,7 @@
"metadata": {
"id": "cm8xMaLbGb7v"
},
- "execution_count": 600,
+ "execution_count": 627,
"outputs": []
},
{
@@ -119,7 +119,7 @@
"metadata": {
"id": "NIq-6M9kHKSs"
},
- "execution_count": 601,
+ "execution_count": 628,
"outputs": []
},
{
@@ -141,7 +141,7 @@
"metadata": {
"id": "z7ZLKLYLHP__"
},
- "execution_count": 602,
+ "execution_count": 629,
"outputs": []
},
{
@@ -173,7 +173,7 @@
"metadata": {
"id": "v5wL6oojz9Ge"
},
- "execution_count": 603,
+ "execution_count": 630,
"outputs": []
},
{
@@ -229,7 +229,7 @@
"metadata": {
"id": "I75K-wx7_sDs"
},
- "execution_count": 604,
+ "execution_count": 631,
"outputs": []
},
{
@@ -266,7 +266,7 @@
"metadata": {
"id": "b2UjSoYez9I1"
},
- "execution_count": 605,
+ "execution_count": 632,
"outputs": []
},
{
@@ -301,7 +301,7 @@
"metadata": {
"id": "xCU4JRP0z9Nh"
},
- "execution_count": 606,
+ "execution_count": 633,
"outputs": []
},
{
@@ -346,7 +346,7 @@
"metadata": {
"id": "gAIzsxEjLmhv"
},
- "execution_count": 607,
+ "execution_count": 634,
"outputs": []
},
{
@@ -394,7 +394,7 @@
"metadata": {
"id": "jRwfE4MFz9Ph"
},
- "execution_count": 608,
+ "execution_count": 635,
"outputs": []
},
{
@@ -417,7 +417,7 @@
"metadata": {
"id": "PBB37YpWTiVR"
},
- "execution_count": 609,
+ "execution_count": 636,
"outputs": []
},
{
@@ -461,7 +461,7 @@
"metadata": {
"id": "39vz-tYsz9Ry"
},
- "execution_count": 610,
+ "execution_count": 637,
"outputs": []
},
{
@@ -488,7 +488,7 @@
"metadata": {
"id": "UelSiWN2z9Tg"
},
- "execution_count": 611,
+ "execution_count": 638,
"outputs": []
},
{
@@ -501,7 +501,7 @@
"\n",
"polyline_prediction = ObjectAnnotation(\n",
" name = \"polyline\", # must match your ontology feature's name\n",
- " # confidence=0.5, ## Not supported for python annotation tools\n",
+ " confidence=0.5, ## Not supported for python annotation tools\n",
" value=Line( # Coordinates for the keypoints in your polyline\n",
" points=[Point(x=2534.353, y=249.471),Point(x=2429.492, y=182.092),Point(x=2294.322, y=221.962),Point(x=2224.491, y=180.463),Point(x=2136.123, y=204.716),\n",
" Point(x=1712.247, y=173.949),Point(x=1703.838, y=84.438),Point(x=1579.772, y=82.61),Point(x=1583.442, y=167.552),\n",
@@ -544,7 +544,7 @@
"metadata": {
"id": "mrjb8qY3z9VY"
},
- "execution_count": 612,
+ "execution_count": 639,
"outputs": []
},
{
@@ -573,24 +573,24 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "58f7cf62-5f30-468b-c83c-58b65a2d10c6"
+ "outputId": "d7a3afdd-f2e7-4602-a0c7-0c6cbe2938f2"
},
- "execution_count": 613,
+ "execution_count": 640,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n"
]
}
@@ -698,7 +698,7 @@
"metadata": {
"id": "Kt4XWWqgIiWk"
},
- "execution_count": 614,
+ "execution_count": 641,
"outputs": []
},
{
@@ -722,7 +722,7 @@
"metadata": {
"id": "8n-AvzdiOR6d"
},
- "execution_count": 615,
+ "execution_count": 642,
"outputs": []
},
{
@@ -744,9 +744,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "5c546b9b-1336-4192-9815-8120e7b53122"
+ "outputId": "88d69ffb-77f0-4dde-abcb-1addd1ae5ab5"
},
- "execution_count": 616,
+ "execution_count": 643,
"outputs": [
{
"output_type": "execute_result",
@@ -756,7 +756,7 @@
]
},
"metadata": {},
- "execution_count": 616
+ "execution_count": 643
}
]
},
@@ -800,7 +800,7 @@
"metadata": {
"id": "zv2OLTXKSGWv"
},
- "execution_count": 617,
+ "execution_count": 644,
"outputs": []
},
{
@@ -840,7 +840,7 @@
"metadata": {
"id": "F-Y7sSyAV3tn"
},
- "execution_count": 618,
+ "execution_count": 645,
"outputs": []
},
{
@@ -868,9 +868,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "9dedec2e-f24a-4800-8b54-48af9d293c07"
+ "outputId": "8eccb11b-fb9e-41d7-a823-7b70b842ffef"
},
- "execution_count": 619,
+ "execution_count": 646,
"outputs": [
{
"output_type": "stream",
@@ -916,7 +916,7 @@
"metadata": {
"id": "jEtoDiDrPFvI"
},
- "execution_count": 620,
+ "execution_count": 647,
"outputs": []
},
{
@@ -942,19 +942,19 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "d7d38c81-42f6-40a4-f73f-2bf3d6de54b2"
+ "outputId": "bd2f36fa-6b97-4b08-b4ce-fe48802fcdf3"
},
- "execution_count": 621,
+ "execution_count": 648,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
- ""
+ ""
]
},
"metadata": {},
- "execution_count": 621
+ "execution_count": 648
}
]
},
@@ -1104,7 +1104,7 @@
"metadata": {
"id": "A8_HVvu9Uvfl"
},
- "execution_count": 622,
+ "execution_count": 649,
"outputs": []
},
{
@@ -1143,7 +1143,7 @@
"metadata": {
"id": "9gD_alThQA3G"
},
- "execution_count": 623,
+ "execution_count": 650,
"outputs": []
},
{
@@ -1173,9 +1173,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "ea4cce4e-dab0-4ee6-857d-f5c3deff7e6a"
+ "outputId": "88d42583-2892-490f-8497-3016bdeacf44"
},
- "execution_count": 624,
+ "execution_count": 651,
"outputs": [
{
"output_type": "stream",
@@ -1207,9 +1207,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "9c40b5e4-3828-4d65-895d-641e5024aff7"
+ "outputId": "5237736c-ec9a-4381-c5d5-14362637bf43"
},
- "execution_count": 625,
+ "execution_count": 652,
"outputs": [
{
"output_type": "execute_result",
@@ -1219,7 +1219,7 @@
]
},
"metadata": {},
- "execution_count": 625
+ "execution_count": 652
}
]
},
@@ -1242,7 +1242,7 @@
"metadata": {
"id": "aAhkyvJlWK1p"
},
- "execution_count": 626,
+ "execution_count": 653,
"outputs": []
}
]
From 5bc4ae15468c59fd15bef8cfc718866b44f287da Mon Sep 17 00:00:00 2001
From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com>
Date: Wed, 25 Jan 2023 13:18:29 -0500
Subject: [PATCH 3/5] Update text
---
examples/prediction_upload/image_predictions.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/prediction_upload/image_predictions.ipynb b/examples/prediction_upload/image_predictions.ipynb
index 881b248aa..b4a82c7bc 100644
--- a/examples/prediction_upload/image_predictions.ipynb
+++ b/examples/prediction_upload/image_predictions.ipynb
@@ -765,7 +765,7 @@
"source": [
"## Step 5. Create the predictions payload\n",
"\n",
- "Create the annotations payload using the snippets of code in [Supported python annotation types and NDJSON](https://colab.research.google.com/drive/14ZdYPz8s58D07AOUjKuJ54rFjM0JVNlf#scrollTo=OePiibbed0nG&line=1&uniqifier=1).\n",
+ "Create the annotations payload using the snippets of code in ***Supported Predictions*** section.\n",
"\n",
"The resulting label_ndjson should have exactly the same content for annotations that are supported by both (with exception of the uuid strings that are generated)"
],
From 7e229b78d4d06009597c926ed1890342bd00b900 Mon Sep 17 00:00:00 2001
From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com>
Date: Thu, 26 Jan 2023 11:22:00 -0500
Subject: [PATCH 4/5] Removed unsupported nested python annotation
---
.../prediction_upload/image_predictions.ipynb | 126 ++++++++----------
1 file changed, 58 insertions(+), 68 deletions(-)
diff --git a/examples/prediction_upload/image_predictions.ipynb b/examples/prediction_upload/image_predictions.ipynb
index b4a82c7bc..5330b69b9 100644
--- a/examples/prediction_upload/image_predictions.ipynb
+++ b/examples/prediction_upload/image_predictions.ipynb
@@ -96,7 +96,7 @@
"metadata": {
"id": "cm8xMaLbGb7v"
},
- "execution_count": 627,
+ "execution_count": 36,
"outputs": []
},
{
@@ -119,7 +119,7 @@
"metadata": {
"id": "NIq-6M9kHKSs"
},
- "execution_count": 628,
+ "execution_count": 37,
"outputs": []
},
{
@@ -141,7 +141,7 @@
"metadata": {
"id": "z7ZLKLYLHP__"
},
- "execution_count": 629,
+ "execution_count": 38,
"outputs": []
},
{
@@ -173,32 +173,17 @@
"metadata": {
"id": "v5wL6oojz9Ge"
},
- "execution_count": 630,
+ "execution_count": 39,
"outputs": []
},
{
"cell_type": "code",
"source": [
- "########## Nested Classifications ##########\n",
+ "########## Nested Classifications are only supported with NDJSON tools ##########\n",
"\n",
- "\n",
- "### Radio #### \n",
- "# Python annotation \n",
- "nested_radio_prediction = ClassificationAnnotation(\n",
- " name=\"nested_radio_question\", \n",
- " value=Radio(answer = ClassificationAnswer(name = \"first_radio_answer\" , confidence=0.5)),\n",
- " classifications=[\n",
- " \tClassificationAnnotation(\n",
- " \tname=\"sub_radio_question\",\n",
- " \t\tvalue=Radio(answer=ClassificationAnswer(name=\"first_sub_radio_answer\", confidence=0.5))\n",
- " )\n",
- " ]\n",
- ")\n",
- "\n",
- "\n",
- "# NDJSON \n",
"nested_radio_prediction_ndjson = {\n",
" \"name\": \"nested_radio_question\",\n",
+ " \"confidence\": 0.5 ,\n",
" \"answer\": { \"name\": \"first_radio_answer\", \"confidence\": 0.5 },\n",
" \"classifications\" : [\n",
" {\n",
@@ -208,11 +193,10 @@
" ]\n",
"}\n",
"\n",
- "### Checklist #### \n",
- "## Only supported with NDJSON tools\n",
"\n",
"nested_checklist_prediction_ndjson = {\n",
" \"name\": \"nested_checklist_question\",\n",
+ " \"confidence\": 0.5 ,\n",
" \"answer\": [{\n",
" \"name\": \"first_checklist_answer\", \n",
" \"confidence\": 0.5,\n",
@@ -229,7 +213,7 @@
"metadata": {
"id": "I75K-wx7_sDs"
},
- "execution_count": 631,
+ "execution_count": 40,
"outputs": []
},
{
@@ -266,7 +250,7 @@
"metadata": {
"id": "b2UjSoYez9I1"
},
- "execution_count": 632,
+ "execution_count": 41,
"outputs": []
},
{
@@ -301,7 +285,7 @@
"metadata": {
"id": "xCU4JRP0z9Nh"
},
- "execution_count": 633,
+ "execution_count": 42,
"outputs": []
},
{
@@ -346,7 +330,7 @@
"metadata": {
"id": "gAIzsxEjLmhv"
},
- "execution_count": 634,
+ "execution_count": 43,
"outputs": []
},
{
@@ -394,7 +378,7 @@
"metadata": {
"id": "jRwfE4MFz9Ph"
},
- "execution_count": 635,
+ "execution_count": 44,
"outputs": []
},
{
@@ -417,7 +401,7 @@
"metadata": {
"id": "PBB37YpWTiVR"
},
- "execution_count": 636,
+ "execution_count": 45,
"outputs": []
},
{
@@ -461,7 +445,7 @@
"metadata": {
"id": "39vz-tYsz9Ry"
},
- "execution_count": 637,
+ "execution_count": 46,
"outputs": []
},
{
@@ -488,7 +472,7 @@
"metadata": {
"id": "UelSiWN2z9Tg"
},
- "execution_count": 638,
+ "execution_count": 47,
"outputs": []
},
{
@@ -544,7 +528,7 @@
"metadata": {
"id": "mrjb8qY3z9VY"
},
- "execution_count": 639,
+ "execution_count": 48,
"outputs": []
},
{
@@ -573,24 +557,24 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "d7a3afdd-f2e7-4602-a0c7-0c6cbe2938f2"
+ "outputId": "800b120e-c41e-4f6f-a509-8d089f3e20bc"
},
- "execution_count": 640,
+ "execution_count": 49,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n"
]
}
@@ -698,7 +682,7 @@
"metadata": {
"id": "Kt4XWWqgIiWk"
},
- "execution_count": 641,
+ "execution_count": 50,
"outputs": []
},
{
@@ -722,7 +706,7 @@
"metadata": {
"id": "8n-AvzdiOR6d"
},
- "execution_count": 642,
+ "execution_count": 51,
"outputs": []
},
{
@@ -744,9 +728,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "88d69ffb-77f0-4dde-abcb-1addd1ae5ab5"
+ "outputId": "4eb2e619-1139-49c5-a9e7-d29838336c05"
},
- "execution_count": 643,
+ "execution_count": 52,
"outputs": [
{
"output_type": "execute_result",
@@ -756,7 +740,7 @@
]
},
"metadata": {},
- "execution_count": 643
+ "execution_count": 52
}
]
},
@@ -765,9 +749,9 @@
"source": [
"## Step 5. Create the predictions payload\n",
"\n",
- "Create the annotations payload using the snippets of code in ***Supported Predictions*** section.\n",
+ "Create the prediction payload using the snippets of code in ***Supported Predictions*** section. \n",
"\n",
- "The resulting label_ndjson should have exactly the same content for annotations that are supported by both (with exception of the uuid strings that are generated)"
+ "The resulting label_ndjson should have exactly the same content for predictions that are supported by both (with exception of the uuid strings that are generated)"
],
"metadata": {
"id": "6FZyvnrqSGuc"
@@ -787,7 +771,7 @@
" polygon_prediction, \n",
" mask_prediction, \n",
" point_prediction,\n",
- " nested_radio_prediction\n",
+ " text_annotation\n",
" ]\n",
")\n",
"\n",
@@ -800,7 +784,7 @@
"metadata": {
"id": "zv2OLTXKSGWv"
},
- "execution_count": 644,
+ "execution_count": 53,
"outputs": []
},
{
@@ -830,6 +814,7 @@
" nested_radio_prediction_ndjson,\n",
" nested_checklist_prediction_ndjson\n",
" \n",
+ " \n",
"]:\n",
" annot.update({\n",
" 'uuid': str(uuid.uuid4()),\n",
@@ -840,7 +825,7 @@
"metadata": {
"id": "F-Y7sSyAV3tn"
},
- "execution_count": 645,
+ "execution_count": 60,
"outputs": []
},
{
@@ -860,7 +845,7 @@
" name=\"prediction_upload_job\"+str(uuid.uuid4()),\n",
" predictions=ndjson_prediction_method2)\n",
"\n",
- "# Errors will appear for annotation uploads that failed.\n",
+ "# Errors will appear for prediction uploads that failed.\n",
"print(\"Errors:\", upload_job_prediction.errors)"
],
"metadata": {
@@ -868,9 +853,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "8eccb11b-fb9e-41d7-a823-7b70b842ffef"
+ "outputId": "c6ab8349-c327-49df-e3f6-00e06370c7e3"
},
- "execution_count": 646,
+ "execution_count": 61,
"outputs": [
{
"output_type": "stream",
@@ -916,7 +901,7 @@
"metadata": {
"id": "jEtoDiDrPFvI"
},
- "execution_count": 647,
+ "execution_count": 62,
"outputs": []
},
{
@@ -942,19 +927,19 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "bd2f36fa-6b97-4b08-b4ce-fe48802fcdf3"
+ "outputId": "262e1be8-3e43-42dd-ac99-e47378f9a705"
},
- "execution_count": 648,
+ "execution_count": 63,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
- ""
+ ""
]
},
"metadata": {},
- "execution_count": 648
+ "execution_count": 63
}
]
},
@@ -1088,7 +1073,7 @@
" ]\n",
"}\n",
"\n",
- "nested_checklist_prediction_ndjson = {\n",
+ "nested_checklist_annotation_ndjson = {\n",
" \"name\": \"nested_checklist_question\",\n",
" \"answer\": [{\n",
" \"name\": \"first_checklist_answer\", \n",
@@ -1099,12 +1084,17 @@
" } \n",
" ] \n",
" }]\n",
+ "}\n",
+ "\n",
+ "text_annotation_ndjson = {\n",
+ " 'name': 'free_text',\n",
+ " 'answer': 'sample text',\n",
"}\n"
],
"metadata": {
"id": "A8_HVvu9Uvfl"
},
- "execution_count": 649,
+ "execution_count": 64,
"outputs": []
},
{
@@ -1119,7 +1109,7 @@
{
"cell_type": "code",
"source": [
- "# Create a Label object by identifying the applicavle data row in Labelbox and providing a list of annotations\n",
+ "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n",
"ndjson_annotation = []\n",
"for annot in [\n",
" radio_annotation_ndjson, \n",
@@ -1131,7 +1121,8 @@
" point_annotation_ndjson, \n",
" polyline_annotation_ndjson,\n",
" nested_radio_annotation_ndjson,\n",
- " nested_checklist_prediction_ndjson\n",
+ " nested_checklist_annotation_ndjson,\n",
+ " text_annotation_ndjson\n",
"]:\n",
" annot.update({\n",
" 'uuid': str(uuid.uuid4()),\n",
@@ -1143,7 +1134,7 @@
"metadata": {
"id": "9gD_alThQA3G"
},
- "execution_count": 650,
+ "execution_count": 65,
"outputs": []
},
{
@@ -1173,9 +1164,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "88d42583-2892-490f-8497-3016bdeacf44"
+ "outputId": "eba0209a-bcde-4816-b386-d69f97899678"
},
- "execution_count": 651,
+ "execution_count": 66,
"outputs": [
{
"output_type": "stream",
@@ -1207,9 +1198,9 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "5237736c-ec9a-4381-c5d5-14362637bf43"
+ "outputId": "b2d68ab6-6d1f-4ce2-d633-8048e8209af3"
},
- "execution_count": 652,
+ "execution_count": 67,
"outputs": [
{
"output_type": "execute_result",
@@ -1219,7 +1210,7 @@
]
},
"metadata": {},
- "execution_count": 652
+ "execution_count": 67
}
]
},
@@ -1235,14 +1226,13 @@
{
"cell_type": "code",
"source": [
- "#upload_job\n",
"# project.delete()\n",
"# dataset.delete()"
],
"metadata": {
"id": "aAhkyvJlWK1p"
},
- "execution_count": 653,
+ "execution_count": 68,
"outputs": []
}
]
From cad468bcfe46a992528bb190d727f9d510ae4b60 Mon Sep 17 00:00:00 2001
From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com>
Date: Mon, 30 Jan 2023 15:23:18 -0500
Subject: [PATCH 5/5] Update image asset link
---
examples/prediction_upload/image_predictions.ipynb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/prediction_upload/image_predictions.ipynb b/examples/prediction_upload/image_predictions.ipynb
index 5330b69b9..25c1beba9 100644
--- a/examples/prediction_upload/image_predictions.ipynb
+++ b/examples/prediction_upload/image_predictions.ipynb
@@ -545,7 +545,7 @@
"source": [
"# send a sample image as batch to the project\n",
"test_img_url = {\n",
- " \"row_data\": \"https://raw.githubusercontent.com/Labelbox/labelbox-python/develop/examples/assets/2560px-Kitano_Street_Kobe01s5s4110.jpg\",\n",
+ " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n",
" \"global_key\": str(uuid.uuid4())\n",
"}\n",
"dataset = client.create_dataset(name=\"image_prediction_demo\")\n",