From a5645d09c07e1f3a53cd64ee028af40c486ecef4 Mon Sep 17 00:00:00 2001 From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com> Date: Tue, 20 Dec 2022 11:19:29 -0500 Subject: [PATCH 1/5] New annotation import format Created examples with all the annotation types supported for videos Created all examples with new NDJSON format annotations --- examples/annotation_import/video.ipynb | 1141 +++++++++++++++++------- 1 file changed, 810 insertions(+), 331 deletions(-) diff --git a/examples/annotation_import/video.ipynb b/examples/annotation_import/video.ipynb index 80a79dc6e..bc41b0d67 100644 --- a/examples/annotation_import/video.ipynb +++ b/examples/annotation_import/video.ipynb @@ -1,346 +1,825 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "db768cda", - "metadata": {}, - "source": [ - "\n", - " \n", - "" - ] - }, - { - "cell_type": "markdown", - "id": "cb5611d0", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "id": "stupid-court", - "metadata": {}, - "source": [ - "# Video MAL" - ] - }, - { - "cell_type": "markdown", - "id": "intellectual-idaho", - "metadata": {}, - "source": [ - "* Upload model inferences for video tasks\n", - "* Support types\n", - " * bounding box" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "voluntary-minister", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install -q labelbox" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "committed-richards", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import uuid\n", - "from io import BytesIO\n", - "from typing import Dict, Any, Tuple\n", - "\n", - "from labelbox import Client, LabelingFrontend\n", - "from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option\n", - "from labelbox.schema.media_type import MediaType" - ] - }, - { - "cell_type": "markdown", - "id": "c8c876b7", - "metadata": {}, - "source": [ - "# API Key and Client\n", - "Provide a valid api key below in order to properly connect to the Labelbox Client." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "affecting-myanmar", - "metadata": {}, - "outputs": [], - "source": [ - "# Add your api key\n", - "API_KEY = None\n", - "client = Client(api_key=API_KEY)" - ] - }, - { - "cell_type": "markdown", - "id": "blessed-venture", - "metadata": {}, - "source": [ - "### Project Setup" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "suburban-crowd", - "metadata": {}, - "outputs": [], - "source": [ - "# We want to try out a few different tools here.\n", - "ontology_builder = OntologyBuilder(\n", - " tools=[Tool(tool=Tool.Type.BBOX, name=\"jellyfish\")])" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "modern-program", - "metadata": {}, - "outputs": [], - "source": [ - "# Lets setup a project to label\n", - "# Note see Ontology, Project, and Project_setup notebooks for more information on this section.\n", - "project = client.create_project(name=\"video_mal_project\", media_type=MediaType.Video)\n", - "dataset = client.create_dataset(name=\"video_mal_dataset\")\n", - "dataset.create_data_row(\n", - " row_data=\n", - " \"https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2Fb8837f3b-b071-98d9-645e-2e2c0302393b-jellyfish2-100-110.mp4\"\n", - ")\n", - "editor = next(\n", - " client.get_labeling_frontends(where=LabelingFrontend.name == \"Editor\"))\n", - "project.setup(editor, ontology_builder.asdict())\n", - "project.datasets.connect(dataset)" - ] - }, - { - "cell_type": "markdown", - "id": "portable-grenada", - "metadata": {}, - "source": [ - "#### Grab featureSchemaIds" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "abstract-fifteen", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'jellyfish': 'cky3dt2lja37d0z9t26wf3qo5'}\n" - ] - } - ], - "source": [ - "# When we created a project with the ontology defined above, all of the ids were assigned.\n", - "# So lets reconstruct the ontology builder with all of the ids.\n", - "ontology = ontology_builder.from_project(project)\n", - "# We want all of the feature schemas to be easily accessible by name.\n", - "schema_lookup = {tool.name: tool.feature_schema_id for tool in ontology.tools}\n", - "print(schema_lookup)" - ] - }, - { - "cell_type": "markdown", - "id": "portuguese-arthur", - "metadata": {}, - "source": [ - "## Import Format\n", - "\n", - "* [Documentation](https://docs.labelbox.com/docs/bounding-box-json)\n", - "\n", - "\n", - "```\n", - "Each row of the import is a unique instance\n", - "\n", - "schemaId: \n", - "dataRow:\n", - " id: \n", - "Instance:\n", - " [Segments]:\n", - " [KeyFrames]:\n", - " frame:\n", - " bbox:\n", - " top:\n", - " bottom:\n", - " height:\n", - " width:\n", - "```\n", - "\n", - "**segments**: A segment represents a continuous section where an object is visible. If an instance disappears then the segment ends. If it re-appears, a new segment is created.\n", - "\n", - "**keyframes**: Key frames identify the location of an instance. Between keyframes, the location of the instance is interpolated.\n", - "\n", - "**bbox**: The coordinates of the bounding box" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "5fc417c5", - "metadata": {}, - "outputs": [], - "source": [ - "segments = [{\n", - " \"keyframes\": [{\n", - " \"frame\": 1,\n", - " \"bbox\": {\n", - " \"top\": 80,\n", - " \"left\": 80,\n", - " \"height\": 80,\n", - " \"width\": 80\n", - " }\n", - " }, {\n", - " \"frame\": 20,\n", - " \"bbox\": {\n", - " \"top\": 125,\n", - " \"left\": 125,\n", - " \"height\": 200,\n", - " \"width\": 300\n", - " }\n", - " }]\n", - "}, {\n", - " \"keyframes\": [{\n", - " \"frame\": 27,\n", - " \"bbox\": {\n", - " \"top\": 80,\n", - " \"left\": 50,\n", - " \"height\": 80,\n", - " \"width\": 50\n", - " }\n", - " }]\n", - "}]" - ] - }, - { - "cell_type": "markdown", - "id": "convertible-entry", - "metadata": {}, - "source": [ - "##### Create helper functions to make this much easier" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "developing-beauty", - "metadata": {}, - "outputs": [], - "source": [ - "def create_video_bbox_ndjson(datarow_id: str, schema_id: str,\n", - " segments: Dict[str, Any]) -> Dict[str, Any]:\n", - " return {\n", - " \"uuid\": str(uuid.uuid4()),\n", - " \"schemaId\": schema_id,\n", - " \"dataRow\": {\n", - " \"id\": datarow_id\n", - " },\n", - " \"segments\": segments\n", - " }" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "db768cda" + }, + "source": [ + "\n", + " \n", + "" + ], + "id": "db768cda" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cb5611d0" + }, + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " " + ], + "id": "cb5611d0" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "stupid-court" + }, + "source": [ + "# Video MAL\n", + "\n", + "* Annotations must be created and uploaded using NDJSON\n", + "* Supported annotations that can be uploaded through the SDK:\n", + " * Bounding box\n", + " * Point\n", + " * Polyline \n", + " * Radio classifications \n", + " * Checklist classifications \n", + "* **NOT** supported:\n", + " * Polygons \n", + " * Segmentation masks\n", + " * Free form text classifications\n", + " * Any nested classifications (under either a tool or classification) \n", + "\n", + "Please note that this list of unsupported annotations only refers to limitations for importing annotations. For example, when using the Labelbox editor, segmentation masks can be created and edited on video assets." + ], + "id": "stupid-court" + }, + { + "cell_type": "markdown", + "source": [ + "### Setup" + ], + "metadata": { + "id": "1zT_5ECvN_cD" + }, + "id": "1zT_5ECvN_cD" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "voluntary-minister" + }, + "outputs": [], + "source": [ + "!pip install -q 'labelbox[data]'" + ], + "id": "voluntary-minister" + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "committed-richards" + }, + "outputs": [], + "source": [ + "import uuid\n", + "from labelbox import Client, LabelingFrontend, MediaType, MALPredictionImport, LabelImport\n", + "from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option\n", + "from labelbox.schema.queue_mode import QueueMode\n", + "from labelbox.data.annotation_types import (\n", + " Label, ObjectAnnotation,\n", + " Rectangle, Point, Line, Radio, Checklist, ClassificationAnnotation, ClassificationAnswer\n", + ")" + ], + "id": "committed-richards" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c8c876b7" + }, + "source": [ + "### Replace with your API key \n", + "Guides on [Create an API key](https://docs.labelbox.com/docs/create-an-api-key)" + ], + "id": "c8c876b7" + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "affecting-myanmar" + }, + "outputs": [], + "source": [ + "# Add your api key\n", + "API_KEY = None\n", + "client = Client(api_key=API_KEY)" + ], + "id": "affecting-myanmar" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "blessed-venture" + }, + "source": [ + "## Support annotations for video\n", + "Only NDJSON annotations are supported with video assets" + ], + "id": "blessed-venture" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kLT9P-WYk4Nr" + }, + "source": [ + "### Supported NDJSON annotations" + ], + "id": "kLT9P-WYk4Nr" + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "suburban-crowd" + }, + "outputs": [], + "source": [ + "######## Bounding box ###########\n", + "\n", + "# NDJSON\n", + "bbox_annotation_ndjson = {\n", + " \"name\" : \"bbox_video\",\n", + " \"segments\" : [{\n", + " \"keyframes\" : [\n", + " {\n", + " \"frame\": 13,\n", + " \"bbox\" : {\n", + " \"top\": 146.0,\n", + " \"left\": 98.0,\n", + " \"height\": 382.0,\n", + " \"width\": 341.0\n", + " } \n", + " },\n", + " {\n", + " \"frame\": 14,\n", + " \"bbox\" : {\n", + " \"top\": 146.0,\n", + " \"left\": 98.0,\n", + " \"height\": 382.0,\n", + " \"width\": 341.0\n", + " } \n", + " },\n", + " {\n", + " \"frame\": 15,\n", + " \"bbox\" : {\n", + " \"top\": 146.0,\n", + " \"left\": 98.0,\n", + " \"height\": 382.0,\n", + " \"width\": 341.0\n", + " } \n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}" + ], + "id": "suburban-crowd" + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "NCkQ1pB6zBne" + }, + "outputs": [], + "source": [ + "######## Point ########\n", + "\n", + "#NDJSON\n", + "point_annotation_ndjson = {\n", + " \"name\": \"point_video\", \n", + " \"segments\": [{\n", + " \"keyframes\": [{\n", + " \"frame\": 17,\n", + " \"point\" : {\n", + " \"x\": 660.134 ,\n", + " \"y\": 407.926\n", + " }\n", + " }]\n", + " }] \n", + "}" + ], + "id": "NCkQ1pB6zBne" + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "8xF7QbwZ41Q5" + }, + "outputs": [], + "source": [ + "######## Polyline ########\n", + "\n", + "# NDJSON (frame based annotations are supported with NDJSON format)\n", + "polyline_frame_annotation_ndjson = {\n", + " \"name\": \"line_video_frame\", \n", + " \"segments\": [\n", + " {\n", + " \"keyframes\": [\n", + " {\n", + " \"frame\": 5,\n", + " \"line\": [{\n", + " \"x\": 680,\n", + " \"y\": 100\n", + " },{\n", + " \"x\": 100,\n", + " \"y\": 190\n", + " },{\n", + " \"x\": 190,\n", + " \"y\": 220\n", + " }]\n", + " },\n", + " {\n", + " \"frame\": 12,\n", + " \"line\": [{\n", + " \"x\": 680,\n", + " \"y\": 280\n", + " },{\n", + " \"x\": 300,\n", + " \"y\": 380\n", + " },{\n", + " \"x\": 400,\n", + " \"y\": 460\n", + " }]\n", + " },\n", + " {\n", + " \"frame\": 20,\n", + " \"line\": [{\n", + " \"x\": 680,\n", + " \"y\": 180\n", + " },{\n", + " \"x\": 100,\n", + " \"y\": 200\n", + " },{\n", + " \"x\": 200,\n", + " \"y\": 260\n", + " }]\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"keyframes\": [\n", + " {\n", + " \"frame\": 24,\n", + " \"line\": [{\n", + " \"x\": 300,\n", + " \"y\": 310\n", + " },{\n", + " \"x\": 330,\n", + " \"y\": 430\n", + " }]\n", + " },\n", + " {\n", + " \"frame\": 45,\n", + " \"line\": [{\n", + " \"x\": 600,\n", + " \"y\": 810\n", + " },{\n", + " \"x\": 900,\n", + " \"y\": 930\n", + " }]\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}" + ], + "id": "8xF7QbwZ41Q5" + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "hm-zJTtE_APt" + }, + "outputs": [], + "source": [ + "######## Checklist classification ########\n", + "\n", + "## NDJSON\n", + "\n", + "## frame specific\n", + "frame_checklist_classification_ndjson = {\n", + " \"name\": \"checklist_class\", \n", + " \"answer\": [\n", + " { \"name\": \"first_checklist_answer\" , \"frames\": [{\"start\": 29, \"end\": 35 }, {\"start\": 48, \"end\": 65}]},\n", + " { \"name\": \"second_checklist_answer\", \"frames\": [{\"start\": 29, \"end\": 35 }, {\"start\": 48, \"end\": 65}]} \n", + " ] \n", + "}\n", + "\n", + "# Global \n", + "global_radio_classification_ndjson = {\n", + " \"name\": \"radio_class_global\", \n", + " \"answer\": { \"name\": \"first_radio_answer\" }\n", + "}\n", + "\n" + ], + "id": "hm-zJTtE_APt" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zpj79CySHpbW" + }, + "source": [ + "## Upload Annotations - putting it all together" + ], + "id": "Zpj79CySHpbW" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "t-5DTORrHtbo" + }, + "source": [ + "### Step 1: Import data rows into Catalog" + ], + "id": "t-5DTORrHtbo" + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "ysLRIWM3HGFv", + "colab": { + "base_uri": "https://localhost:8080/" }, + "outputId": "5f6d80b2-be9a-4462-a0d4-06981b811f7e" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 11, - "id": "asian-savings", - "metadata": {}, - "outputs": [], - "source": [ - "uploads = []\n", - "\n", - "for data_row in dataset.data_rows():\n", - " uploads.append(\n", - " create_video_bbox_ndjson(data_row.uid, schema_lookup['jellyfish'],\n", - " segments))" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "clbweokzu0hup07znaag28k3r\n", + "\n" + ] + } + ], + "source": [ + "from labelbox.data.annotation_types.collection import uuid4\n", + "client = Client(API_KEY)\n", + "\n", + "asset = {\n", + " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\", \n", + " \"global_key\": str(uuid.uuid4()),\n", + " \"media_type\": \"VIDEO\"\n", + "}\n", + "\n", + "dataset = client.create_dataset(name=\"video_demo_dataset\")\n", + "data_row = dataset.create_data_row(asset)\n", + "print(data_row.uid)\n", + "print(data_row)" + ], + "id": "ysLRIWM3HGFv" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AXopoqTqKOvW" + }, + "source": [ + "### Step 2: Create/select an ontology\n", + "Your project should have the correct ontology setup with all the tools and classifications supported for your annotations, and the tool names and classification instructions should match the `name`/`instructions` fields in your annotations to ensure the correct feature schemas are matched.\n", + "\n", + "For example, when we create the bounding box annotation [above](https://colab.research.google.com/drive/1S3HBa10jwfFFNB71Uid-crUDdhfEGnXh#scrollTo=suburban-crowd&line=10&uniqifier=1), we provided the `name` as `bbox_video`. Now, when we setup our ontology, we must ensure that the name of my bounding box tool is also `bbox_video`. The same alignment must hold true for the other tools and classifications we create in our ontology.\n", + "\n", + "\n", + "[Documentation for reference ](https://docs.labelbox.com/reference/import-text-annotations)" + ], + "id": "AXopoqTqKOvW" + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "ZCjNJBTSKONt" + }, + "outputs": [], + "source": [ + "ontology_builder = OntologyBuilder(\n", + " tools=[\n", + " Tool(tool=Tool.Type.BBOX, name=\"bbox_video\"),\n", + " Tool(tool=Tool.Type.POINT, name=\"point_video\"),\n", + " Tool(tool=Tool.Type.LINE, name=\"line_video_frame\")\n", + " ],\n", + " classifications=[ \n", + " Classification(\n", + " class_type=Classification.Type.CHECKLIST, \n", + " instructions=\"checklist_class\",\n", + " scope = Classification.Scope.INDEX, ## Need to defined scope for frame classifications \n", + " options=[ \n", + " Option(value=\"first_checklist_answer\"),\n", + " Option(value=\"second_checklist_answer\")\n", + " ]\n", + " ),\n", + " Classification(\n", + " class_type=Classification.Type.RADIO, \n", + " instructions=\"radio_class_global\",\n", + " options=[ \n", + " Option(value=\"first_radio_answer\"),\n", + " Option(value=\"second_radio_answer\")\n", + " ]\n", + " ) \n", + " ] \n", + ")\n", + "\n", + "ontology = client.create_ontology(\"Ontology Video Annotations\", ontology_builder.asdict())" + ], + "id": "ZCjNJBTSKONt" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "portable-grenada" + }, + "source": [ + "### Step 3: Create a labeling project \n", + "Connect the ontology to the labeling project." + ], + "id": "portable-grenada" + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "wPLSHXrQPfHh" + }, + "outputs": [], + "source": [ + "# Project defaults to batch mode with benchmark quality settings if this argument is not provided\n", + "# Queue mode will be deprecated once dataset mode is deprecated\n", + "\n", + "project = client.create_project(name=\"video_project_demo\",\n", + " queue_mode=QueueMode.Batch,\n", + " media_type=MediaType.Video)\n", + "\n", + "## connect ontology to your project\n", + "project.setup_editor(ontology)\n", + "\n", + "######################### DATASET CONSENSUS OPTION ########################\n", + "# Note that dataset base projects will be deprecated in the near future.\n", + "\n", + "# To use Datasets/Consensus instead of Batches/Benchmarks use the following query: \n", + "# In this case, 10% of all data rows need to be annotated by three labelers.\n", + "\n", + "# dataset_project = client.create_project(name=\"dataset-test-project\",\n", + "# description=\"a description\",\n", + "# media_type=MediaType.Text,\n", + "# auto_audit_percentage=0.1,\n", + "# auto_audit_number_of_labels=3,\n", + "# queue_mode=QueueMode.Dataset)\n", + "\n", + "# dataset_project.datasets.connect(dataset)" + ], + "id": "wPLSHXrQPfHh" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GVVPuZVSX6wi" + }, + "source": [ + "### Step 4: Send a batch of data rows to the project" + ], + "id": "GVVPuZVSX6wi" + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "id": "UhqBD3gAYCAg", + "colab": { + "base_uri": "https://localhost:8080/" }, + "outputId": "eb929f63-44af-40d9-e98e-72eb14a782ab" + }, + "outputs": [ { - "cell_type": "markdown", - "id": "perfect-seafood", - "metadata": {}, - "source": [ - "### Upload the annotations" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "Batch: \n" + ] + } + ], + "source": [ + "# Create batches\n", + "\n", + "# Create a batch to send to your MAL project\n", + "batch = project.create_batch(\n", + " \"first-batch-video-demo2\", # Each batch in a project must have a unique name\n", + " dataset.export_data_rows(), # A paginated collection of data row objects\n", + " 5 # priority between 1(Highest) - 5(lowest)\n", + ")\n", + "\n", + "print(\"Batch: \", batch)" + ], + "id": "UhqBD3gAYCAg" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gKVEw7AMYIc1" + }, + "source": [ + "### Step 5: Create the annotations payload \n", + "Create the annotations payload using the snippets of code above.\n", + "\n", + "Labelbox supports two formats for the annotations payload: NDJSON and Python Annotation types. However, for video assets, only NDJSON format is supported." + ], + "id": "gKVEw7AMYIc1" + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ew0UVWtGZzTO" + }, + "source": [ + "#### NDJSON annotations\n", + "Here we create the complete `label_ndjson` payload of annotations. There is one annotation for each *reference to an annotation* that we created [above](https://colab.research.google.com/drive/1S3HBa10jwfFFNB71Uid-crUDdhfEGnXh#scrollTo=kLT9P-WYk4Nr&line=1&uniqifier=1)." + ], + "id": "ew0UVWtGZzTO" + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "qnFTqR6eZ3mE" + }, + "outputs": [], + "source": [ + "label_ndjson = []\n", + "\n", + "for annotations in [point_annotation_ndjson,\n", + " bbox_annotation_ndjson,\n", + " polyline_frame_annotation_ndjson, \n", + " frame_checklist_classification_ndjson, \n", + " global_radio_classification_ndjson\n", + " ]: \n", + " annotations.update({\n", + " 'uuid' : str(uuid.uuid4()),\n", + " 'dataRow': {\n", + " 'id': next(dataset.export_data_rows()).uid\n", + " }\n", + " })\n", + " label_ndjson.append(annotations)\n" + ], + "id": "qnFTqR6eZ3mE" + }, + { + "cell_type": "code", + "source": [ + " label_ndjson" + ], + "metadata": { + "id": "YlQUVJ17Ow0-", + "colab": { + "base_uri": "https://localhost:8080/" }, + "outputId": "d46ec73e-9e6a-4264-8e4a-56ae0e5f3edb" + }, + "id": "YlQUVJ17Ow0-", + "execution_count": 14, + "outputs": [ { - "cell_type": "code", - "execution_count": 12, - "id": "entire-community", - "metadata": {}, - "outputs": [], - "source": [ - "# Let's upload!\n", - "# Validate must be set to false for video bounding boxes\n", - "upload_task = project.upload_annotations(name=f\"upload-job-{uuid.uuid4()}\",\n", - " annotations=uploads,\n", - " validate=False)" + "output_type": "execute_result", + "data": { + "text/plain": [ + "[{'name': 'point_video',\n", + " 'segments': [{'keyframes': [{'frame': 17,\n", + " 'point': {'x': 660.134, 'y': 407.926}}]}],\n", + " 'uuid': '75cb8bb5-b844-4b16-97b9-be6c0e92bc85',\n", + " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " {'name': 'bbox_video',\n", + " 'segments': [{'keyframes': [{'frame': 13,\n", + " 'bbox': {'top': 146.0, 'left': 98.0, 'height': 382.0, 'width': 341.0}},\n", + " {'frame': 14,\n", + " 'bbox': {'top': 146.0, 'left': 98.0, 'height': 382.0, 'width': 341.0}},\n", + " {'frame': 15,\n", + " 'bbox': {'top': 146.0,\n", + " 'left': 98.0,\n", + " 'height': 382.0,\n", + " 'width': 341.0}}]}],\n", + " 'uuid': '36fb5752-177a-46bc-9531-240eb402877d',\n", + " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " {'name': 'line_video_frame',\n", + " 'segments': [{'keyframes': [{'frame': 5,\n", + " 'line': [{'x': 680, 'y': 100},\n", + " {'x': 100, 'y': 190},\n", + " {'x': 190, 'y': 220}]},\n", + " {'frame': 12,\n", + " 'line': [{'x': 680, 'y': 280},\n", + " {'x': 300, 'y': 380},\n", + " {'x': 400, 'y': 460}]},\n", + " {'frame': 20,\n", + " 'line': [{'x': 680, 'y': 180},\n", + " {'x': 100, 'y': 200},\n", + " {'x': 200, 'y': 260}]}]},\n", + " {'keyframes': [{'frame': 24,\n", + " 'line': [{'x': 300, 'y': 310}, {'x': 330, 'y': 430}]},\n", + " {'frame': 45, 'line': [{'x': 600, 'y': 810}, {'x': 900, 'y': 930}]}]}],\n", + " 'uuid': 'a51507b2-7781-4d1a-aa50-b6f7e45be7b9',\n", + " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " {'name': 'checklist_class',\n", + " 'answer': [{'name': 'first_checklist_answer',\n", + " 'frames': [{'start': 29, 'end': 35}, {'start': 48, 'end': 65}]},\n", + " {'name': 'second_checklist_answer',\n", + " 'frames': [{'start': 29, 'end': 35}, {'start': 48, 'end': 65}]}],\n", + " 'uuid': 'b375757b-d881-4bd3-b948-c5c676d6bd62',\n", + " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " {'name': 'radio_class_global',\n", + " 'answer': {'name': 'first_radio_answer'},\n", + " 'uuid': 'a19ab316-dc97-4014-bbd7-aff0d28a2b13',\n", + " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}}]" ] + }, + "metadata": {}, + "execution_count": 14 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "perfect-seafood" + }, + "source": [ + "### Step 6: Upload annotations to a project as pre-labels or completed labels\n", + "For the purpose of this tutorial only run one of the label imports at once, otherwise the previous import might get overwritten." + ], + "id": "perfect-seafood" + }, + { + "cell_type": "markdown", + "source": [ + "#### Model-Assisted Labeling (MAL)" + ], + "metadata": { + "id": "duR8GYczNCmy" + }, + "id": "duR8GYczNCmy" + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "entire-community", + "colab": { + "base_uri": "https://localhost:8080/" }, + "outputId": "91d1175c-128c-4888-98e4-a815b7040642" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 13, - "id": "hollywood-faculty", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[]\n" - ] - } - ], - "source": [ - "# Wait for upload to finish (Will take up to five minutes)\n", - "upload_task.wait_until_done()\n", - "# Review the upload status\n", - "print(upload_task.errors)" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "Errors: []\n", + " \n" + ] } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" + ], + "source": [ + "# Upload MAL label for this data row in project\n", + "upload_job_mal = MALPredictionImport.create_from_objects(\n", + " client = client, \n", + " project_id = project.uid, \n", + " name=\"mal_import_job-\" + str(uuid.uuid4()), \n", + " predictions=label_ndjson)\n", + "\n", + "upload_job_mal.wait_until_done();\n", + "print(\"Errors:\", upload_job_mal.errors)\n", + "print(\" \")" + ], + "id": "entire-community" + }, + { + "cell_type": "markdown", + "source": [ + "#### Label Import" + ], + "metadata": { + "id": "OCilDEz_wxpb" + }, + "id": "OCilDEz_wxpb" + }, + { + "cell_type": "code", + "source": [ + "upload_job_label_import = LabelImport.create_from_objects(\n", + " client = client,\n", + " project_id = project.uid, \n", + " name = \"label_import_job-\" + str(uuid.uuid4()),\n", + " labels=label_ndjson\n", + ")\n", + "\n", + "upload_job_label_import.wait_until_done();\n", + "print(\"Errors:\", upload_job_label_import.errors)\n", + "print(\" \")" + ], + "metadata": { + "id": "si-6kQ4mwUBO", + "colab": { + "base_uri": "https://localhost:8080/" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.8" + "outputId": "6f766cc3-28cb-4237-b3a6-12c196a461a7" + }, + "id": "si-6kQ4mwUBO", + "execution_count": 17, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Errors: []\n", + " \n" + ] } + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Optional deletions for cleanup" + ], + "metadata": { + "id": "jdMmQxoVNP6q" + }, + "id": "jdMmQxoVNP6q" + }, + { + "cell_type": "code", + "source": [ + "# Delete Project\n", + "# project.delete()\n", + "# dataset.delete()" + ], + "metadata": { + "id": "yPZaLM74LFhB" + }, + "id": "yPZaLM74LFhB", + "execution_count": null, + "outputs": [] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 5 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } From 261c4f32108db7175678a3e8f33f16aa08b1536d Mon Sep 17 00:00:00 2001 From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com> Date: Tue, 20 Dec 2022 11:28:55 -0500 Subject: [PATCH 2/5] Updated github / jupyter links Updated github + jupyter links to point to annotation import folder https://github.com/Labelbox/labelbox-python/tree/develop/examples/annotation_import/video.ipynb\ --- examples/annotation_import/video.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/annotation_import/video.ipynb b/examples/annotation_import/video.ipynb index bc41b0d67..d5f695a85 100644 --- a/examples/annotation_import/video.ipynb +++ b/examples/annotation_import/video.ipynb @@ -19,13 +19,13 @@ }, "source": [ "\n", - "\n", "\n", "\n", "\n", "\n", - "\n", " " ], From 25dc660238184d5f4dee63117ba186b7123fad2a Mon Sep 17 00:00:00 2001 From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com> Date: Wed, 4 Jan 2023 16:03:09 -0500 Subject: [PATCH 3/5] Added nested classification examples --- examples/annotation_import/video.ipynb | 362 +++++++++++++++++++------ 1 file changed, 277 insertions(+), 85 deletions(-) diff --git a/examples/annotation_import/video.ipynb b/examples/annotation_import/video.ipynb index d5f695a85..12e0fc630 100644 --- a/examples/annotation_import/video.ipynb +++ b/examples/annotation_import/video.ipynb @@ -37,7 +37,7 @@ "id": "stupid-court" }, "source": [ - "# Video MAL\n", + "# Video Annotation Import\n", "\n", "* Annotations must be created and uploaded using NDJSON\n", "* Supported annotations that can be uploaded through the SDK:\n", @@ -50,7 +50,7 @@ " * Polygons \n", " * Segmentation masks\n", " * Free form text classifications\n", - " * Any nested classifications (under either a tool or classification) \n", + " * tool nested classification\n", "\n", "Please note that this list of unsupported annotations only refers to limitations for importing annotations. For example, when using the Labelbox editor, segmentation masks can be created and edited on video assets." ], @@ -58,21 +58,36 @@ }, { "cell_type": "markdown", - "source": [ - "### Setup" - ], "metadata": { "id": "1zT_5ECvN_cD" }, + "source": [ + "### Setup" + ], "id": "1zT_5ECvN_cD" }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { - "id": "voluntary-minister" + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "voluntary-minister", + "outputId": "e8d4945a-2268-40f0-d909-be1f198d7b09" }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m185.5/185.5 KB\u001b[0m \u001b[31m8.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.8/7.8 MB\u001b[0m \u001b[31m73.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Building wheel for pygeotile (setup.py) ... \u001b[?25l\u001b[?25hdone\n" + ] + } + ], "source": [ "!pip install -q 'labelbox[data]'" ], @@ -80,7 +95,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "metadata": { "id": "committed-richards" }, @@ -110,7 +125,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": { "id": "affecting-myanmar" }, @@ -128,7 +143,7 @@ "id": "blessed-venture" }, "source": [ - "## Support annotations for video\n", + "## Supported annotations for video\n", "Only NDJSON annotations are supported with video assets" ], "id": "blessed-venture" @@ -145,7 +160,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": { "id": "suburban-crowd" }, @@ -194,7 +209,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "metadata": { "id": "NCkQ1pB6zBne" }, @@ -220,7 +235,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": { "id": "8xF7QbwZ41Q5" }, @@ -306,7 +321,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "metadata": { "id": "hm-zJTtE_APt" }, @@ -334,6 +349,82 @@ ], "id": "hm-zJTtE_APt" }, + { + "cell_type": "code", + "source": [ + "########## Nested Global Classification ########### \n", + "\n", + "nested_classification = {\n", + " 'name': 'radio_question_nested',\n", + " 'answer': {'name': 'first_radio_question'},\n", + " 'classifications' : [\n", + " {'name': 'sub_question_radio', 'answer': {'name': 'sub_answer'}}\n", + " ]\n", + "}" + ], + "metadata": { + "id": "N5ibW4frr5rw" + }, + "id": "N5ibW4frr5rw", + "execution_count": 11, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "########## Frame Classifications ##########\n", + "frame_bbox_with_checklist_subclass_ndjson = {\n", + " \"name\": \"bbox_class\",\n", + " \"segments\": [{\n", + " \"keyframes\": [\n", + " {\n", + " \"frame\": 10,\n", + " \"bbox\": {\n", + " \"top\": 1366.0,\n", + " \"left\": 662.0,\n", + " \"height\": 1688.0,\n", + " \"width\": 876.0\n", + " },\n", + " \"classifications\" : [\n", + " {'schemaId' : '', 'answer' : {'schemaId': '' }}\n", + " ] \n", + " },\n", + " { \n", + " \"frame\": 11,\n", + " \"bbox\": {\n", + " \"top\": 1366.0,\n", + " \"left\": 662.0,\n", + " \"height\": 1688.0,\n", + " \"width\": 876.0\n", + " },\n", + " \"classifications\" : [\n", + " {'schemaId' : '', 'answer' : {'schemaId': '' }}\n", + " ] \n", + " },\n", + " { \n", + " \"frame\": 13,\n", + " \"bbox\": {\n", + " \"top\": 1366.0,\n", + " \"left\": 662.0,\n", + " \"height\": 1688.0,\n", + " \"width\": 876.0\n", + " },\n", + " \"classifications\" : [\n", + " {'schemaId' : '', 'answer' : {'schemaId': '' }}\n", + " ] \n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}" + ], + "metadata": { + "id": "TU6PLazxZdWl" + }, + "id": "TU6PLazxZdWl", + "execution_count": 12, + "outputs": [] + }, { "cell_type": "markdown", "metadata": { @@ -356,30 +447,30 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 13, "metadata": { - "id": "ysLRIWM3HGFv", "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "5f6d80b2-be9a-4462-a0d4-06981b811f7e" + "id": "ysLRIWM3HGFv", + "outputId": "517efa9f-f85a-4de4-eaa2-fbf45ad9e325" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "clbweokzu0hup07znaag28k3r\n", + "clci3wcf60nfu070kctyy1n4t\n", "\n" ] } @@ -419,7 +510,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 14, "metadata": { "id": "ZCjNJBTSKONt" }, @@ -429,7 +520,22 @@ " tools=[\n", " Tool(tool=Tool.Type.BBOX, name=\"bbox_video\"),\n", " Tool(tool=Tool.Type.POINT, name=\"point_video\"),\n", - " Tool(tool=Tool.Type.LINE, name=\"line_video_frame\")\n", + " Tool(tool=Tool.Type.LINE, name=\"line_video_frame\"),\n", + " Tool(\n", + " tool=Tool.Type.BBOX, name=\"bbox_class\",\n", + " classifications=[\n", + " Classification(\n", + " class_type=Classification.Type.RADIO, \n", + " instructions=\"bbox_radio\", \n", + " scope = Classification.Scope.INDEX,\n", + " options=[\n", + " Option(value=\"bbox_radio_answer_1\"),\n", + " Option(value=\"bbox_radio_answer_2\"),\n", + " Option(value=\"bbox_radio_answer_3\")\n", + " ]\n", + " )\n", + " ]\n", + " )\n", " ],\n", " classifications=[ \n", " Classification(\n", @@ -448,7 +554,22 @@ " Option(value=\"first_radio_answer\"),\n", " Option(value=\"second_radio_answer\")\n", " ]\n", - " ) \n", + " ),\n", + " Classification(\n", + " class_type=Classification.Type.RADIO, \n", + " instructions=\"radio_question_nested\",\n", + " options=[\n", + " Option(\"first_radio_question\",\n", + " options=[\n", + " Classification(\n", + " class_type=Classification.Type.RADIO,\n", + " instructions=\"sub_question_radio\",\n", + " options=[Option(\"sub_answer\")]\n", + " )\n", + " ]\n", + " )\n", + " ] \n", + " ) \n", " ] \n", ")\n", "\n", @@ -469,7 +590,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 15, "metadata": { "id": "wPLSHXrQPfHh" }, @@ -516,11 +637,11 @@ "cell_type": "code", "execution_count": 16, "metadata": { - "id": "UhqBD3gAYCAg", "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "eb929f63-44af-40d9-e98e-72eb14a782ab" + "id": "UhqBD3gAYCAg", + "outputId": "b1917de7-e584-4ee6-e795-869e15646765" }, "outputs": [ { @@ -529,11 +650,11 @@ "text": [ "Batch: \n" ] } @@ -578,7 +699,55 @@ }, { "cell_type": "code", - "execution_count": 12, + "source": [ + "## For nested frame base classifications we need to pass a featureSchemaId instead of the name. \n", + "## We are working towards implementing the \"name\" format for frame base classifciations\n", + "\n", + "features = project.ontology().normalized\n", + "\n", + "for i in features['tools']:\n", + " print(i)\n", + " if i['name'] == 'bbox_class':\n", + " ## Classification feature schema id\n", + " class_feature_schema_id = i['classifications'][0]['featureSchemaId']\n", + " ## Answer feature schema id (select one of the answers)\n", + " class_options_feature_schema_id = i['classifications'][0]['options'][0]['featureSchemaId']\n", + "\n", + " ## Update the original annotation with the schema ids\n", + " for frame in frame_bbox_with_checklist_subclass_ndjson['segments']:\n", + " for k in frame['keyframes']:\n", + " k['classifications'][0].update(\n", + " {'schemaId': class_feature_schema_id , \n", + " 'answer': {'schemaId': class_options_feature_schema_id}\n", + " }\n", + " )\n", + " " + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4V7BXb-XfFCY", + "outputId": "2ee58656-eb62-4f31-9e3f-3e5db727e44d" + }, + "id": "4V7BXb-XfFCY", + "execution_count": 51, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'schemaNodeId': 'clci3z94n0n6d073ub87k7k0v', 'featureSchemaId': 'clci3z94n0n6c073ufr1ud763', 'required': False, 'name': 'bbox_video', 'tool': 'rectangle', 'color': '#ff0000', 'archived': 0, 'classifications': []}\n", + "{'schemaNodeId': 'clci3z94n0n6f073u5gz9enfy', 'featureSchemaId': 'clci3z94n0n6e073u5u06aowr', 'required': False, 'name': 'point_video', 'tool': 'point', 'color': '#7fff00', 'archived': 0, 'classifications': []}\n", + "{'schemaNodeId': 'clci3z94n0n6h073ugpe62urq', 'featureSchemaId': 'clci3z94n0n6g073u6n0205k3', 'required': False, 'name': 'line_video_frame', 'tool': 'line', 'color': '#00ffff', 'archived': 0, 'classifications': []}\n", + "{'schemaNodeId': 'clci3z94n0n6r073u8m7ie885', 'featureSchemaId': 'clci3z94n0n6i073uhcjf6xli', 'required': False, 'name': 'bbox_class', 'tool': 'rectangle', 'color': '#7f00ff', 'archived': 0, 'classifications': [{'schemaNodeId': 'clci3z94n0n6q073u7kerbw3x', 'featureSchemaId': 'clci3z94n0n6j073uh032ajsd', 'archived': 0, 'required': False, 'instructions': 'bbox_radio', 'name': 'bbox_radio', 'type': 'radio', 'options': [{'schemaNodeId': 'clci3z94n0n6l073u3zwsdoav', 'featureSchemaId': 'clci3z94n0n6k073u215fhvbf', 'label': 'bbox_radio_answer_1', 'value': 'bbox_radio_answer_1'}, {'schemaNodeId': 'clci3z94n0n6n073ufsdchpyk', 'featureSchemaId': 'clci3z94n0n6m073uf0odgc96', 'label': 'bbox_radio_answer_2', 'value': 'bbox_radio_answer_2'}, {'schemaNodeId': 'clci3z94n0n6p073ubli8c9f2', 'featureSchemaId': 'clci3z94n0n6o073ua030ampv', 'label': 'bbox_radio_answer_3', 'value': 'bbox_radio_answer_3'}]}]}\n" + ] + } + ] + }, + { + "cell_type": "code", + "execution_count": 52, "metadata": { "id": "qnFTqR6eZ3mE" }, @@ -590,7 +759,9 @@ " bbox_annotation_ndjson,\n", " polyline_frame_annotation_ndjson, \n", " frame_checklist_classification_ndjson, \n", - " global_radio_classification_ndjson\n", + " global_radio_classification_ndjson,\n", + " nested_classification,\n", + " frame_bbox_with_checklist_subclass_ndjson\n", " ]: \n", " annotations.update({\n", " 'uuid' : str(uuid.uuid4()),\n", @@ -605,17 +776,17 @@ { "cell_type": "code", "source": [ - " label_ndjson" + "label_ndjson" ], "metadata": { - "id": "YlQUVJ17Ow0-", "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "d46ec73e-9e6a-4264-8e4a-56ae0e5f3edb" + "id": "ZNQAjFculCTl", + "outputId": "1e32c870-234b-4b0e-dd9d-ad993a87607a" }, - "id": "YlQUVJ17Ow0-", - "execution_count": 14, + "id": "ZNQAjFculCTl", + "execution_count": 53, "outputs": [ { "output_type": "execute_result", @@ -624,8 +795,8 @@ "[{'name': 'point_video',\n", " 'segments': [{'keyframes': [{'frame': 17,\n", " 'point': {'x': 660.134, 'y': 407.926}}]}],\n", - " 'uuid': '75cb8bb5-b844-4b16-97b9-be6c0e92bc85',\n", - " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " 'uuid': '0a8ebf1d-abb7-4eb0-b4e5-6fa72e2249e2',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}},\n", " {'name': 'bbox_video',\n", " 'segments': [{'keyframes': [{'frame': 13,\n", " 'bbox': {'top': 146.0, 'left': 98.0, 'height': 382.0, 'width': 341.0}},\n", @@ -636,8 +807,8 @@ " 'left': 98.0,\n", " 'height': 382.0,\n", " 'width': 341.0}}]}],\n", - " 'uuid': '36fb5752-177a-46bc-9531-240eb402877d',\n", - " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " 'uuid': 'cd992918-7ba4-484e-840e-d00fe043342b',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}},\n", " {'name': 'line_video_frame',\n", " 'segments': [{'keyframes': [{'frame': 5,\n", " 'line': [{'x': 680, 'y': 100},\n", @@ -654,23 +825,44 @@ " {'keyframes': [{'frame': 24,\n", " 'line': [{'x': 300, 'y': 310}, {'x': 330, 'y': 430}]},\n", " {'frame': 45, 'line': [{'x': 600, 'y': 810}, {'x': 900, 'y': 930}]}]}],\n", - " 'uuid': 'a51507b2-7781-4d1a-aa50-b6f7e45be7b9',\n", - " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " 'uuid': '6b55c42d-e32d-44ca-944e-32d5173ee8f7',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}},\n", " {'name': 'checklist_class',\n", " 'answer': [{'name': 'first_checklist_answer',\n", " 'frames': [{'start': 29, 'end': 35}, {'start': 48, 'end': 65}]},\n", " {'name': 'second_checklist_answer',\n", " 'frames': [{'start': 29, 'end': 35}, {'start': 48, 'end': 65}]}],\n", - " 'uuid': 'b375757b-d881-4bd3-b948-c5c676d6bd62',\n", - " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}},\n", + " 'uuid': 'c7740bbf-c7f9-4c8f-a00b-9dbe21f1ec81',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}},\n", " {'name': 'radio_class_global',\n", " 'answer': {'name': 'first_radio_answer'},\n", - " 'uuid': 'a19ab316-dc97-4014-bbd7-aff0d28a2b13',\n", - " 'dataRow': {'id': 'clbweokzu0hup07znaag28k3r'}}]" + " 'uuid': '5603c24f-7567-4af1-9ca8-df289dcef1ea',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}},\n", + " {'name': 'radio_question_nested',\n", + " 'answer': {'name': 'first_radio_question'},\n", + " 'classifications': [{'name': 'sub_question_radio',\n", + " 'answer': {'name': 'sub_answer'}}],\n", + " 'uuid': 'cb0532c5-5b24-41a6-94bd-1ff619931ddf',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}},\n", + " {'name': 'bbox_class',\n", + " 'segments': [{'keyframes': [{'frame': 10,\n", + " 'bbox': {'top': 1366.0, 'left': 662.0, 'height': 1688.0, 'width': 876.0},\n", + " 'classifications': [{'schemaId': 'clci3z94n0n6j073uh032ajsd',\n", + " 'answer': {'schemaId': 'clci3z94n0n6k073u215fhvbf'}}]},\n", + " {'frame': 11,\n", + " 'bbox': {'top': 1366.0, 'left': 662.0, 'height': 1688.0, 'width': 876.0},\n", + " 'classifications': [{'schemaId': 'clci3z94n0n6j073uh032ajsd',\n", + " 'answer': {'schemaId': 'clci3z94n0n6k073u215fhvbf'}}]},\n", + " {'frame': 13,\n", + " 'bbox': {'top': 1366.0, 'left': 662.0, 'height': 1688.0, 'width': 876.0},\n", + " 'classifications': [{'schemaId': 'clci3z94n0n6j073uh032ajsd',\n", + " 'answer': {'schemaId': 'clci3z94n0n6k073u215fhvbf'}}]}]}],\n", + " 'uuid': '1e6c9151-2934-4c0a-a4d9-7b68da1996db',\n", + " 'dataRow': {'id': 'clci3wcf60nfu070kctyy1n4t'}}]" ] }, "metadata": {}, - "execution_count": 14 + "execution_count": 53 } ] }, @@ -687,23 +879,23 @@ }, { "cell_type": "markdown", - "source": [ - "#### Model-Assisted Labeling (MAL)" - ], "metadata": { "id": "duR8GYczNCmy" }, + "source": [ + "#### Model-Assisted Labeling (MAL)" + ], "id": "duR8GYczNCmy" }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 50, "metadata": { - "id": "entire-community", "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "91d1175c-128c-4888-98e4-a815b7040642" + "id": "entire-community", + "outputId": "791b9ec6-07c5-437d-fedc-84d4fb990247" }, "outputs": [ { @@ -731,37 +923,24 @@ }, { "cell_type": "markdown", - "source": [ - "#### Label Import" - ], "metadata": { "id": "OCilDEz_wxpb" }, + "source": [ + "#### Label Import" + ], "id": "OCilDEz_wxpb" }, { "cell_type": "code", - "source": [ - "upload_job_label_import = LabelImport.create_from_objects(\n", - " client = client,\n", - " project_id = project.uid, \n", - " name = \"label_import_job-\" + str(uuid.uuid4()),\n", - " labels=label_ndjson\n", - ")\n", - "\n", - "upload_job_label_import.wait_until_done();\n", - "print(\"Errors:\", upload_job_label_import.errors)\n", - "print(\" \")" - ], + "execution_count": 54, "metadata": { - "id": "si-6kQ4mwUBO", "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "6f766cc3-28cb-4237-b3a6-12c196a461a7" + "id": "si-6kQ4mwUBO", + "outputId": "dd243939-6e73-4308-f72e-2770a0ff3a2d" }, - "id": "si-6kQ4mwUBO", - "execution_count": 17, "outputs": [ { "output_type": "stream", @@ -771,31 +950,44 @@ " \n" ] } - ] + ], + "source": [ + "upload_job_label_import = LabelImport.create_from_objects(\n", + " client = client,\n", + " project_id = project.uid, \n", + " name = \"label_import_job-\" + str(uuid.uuid4()),\n", + " labels=label_ndjson\n", + ")\n", + "\n", + "upload_job_label_import.wait_until_done();\n", + "print(\"Errors:\", upload_job_label_import.errors)\n", + "print(\" \")" + ], + "id": "si-6kQ4mwUBO" }, { "cell_type": "markdown", - "source": [ - "### Optional deletions for cleanup" - ], "metadata": { "id": "jdMmQxoVNP6q" }, + "source": [ + "### Optional deletions for cleanup" + ], "id": "jdMmQxoVNP6q" }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yPZaLM74LFhB" + }, + "outputs": [], "source": [ "# Delete Project\n", "# project.delete()\n", "# dataset.delete()" ], - "metadata": { - "id": "yPZaLM74LFhB" - }, - "id": "yPZaLM74LFhB", - "execution_count": null, - "outputs": [] + "id": "yPZaLM74LFhB" } ], "metadata": { From 3a23d5d8823d059b10ed4d65c5b137fcc995755a Mon Sep 17 00:00:00 2001 From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com> Date: Wed, 4 Jan 2023 16:06:06 -0500 Subject: [PATCH 4/5] final updates --- examples/annotation_import/video.ipynb | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/annotation_import/video.ipynb b/examples/annotation_import/video.ipynb index 12e0fc630..148b41b47 100644 --- a/examples/annotation_import/video.ipynb +++ b/examples/annotation_import/video.ipynb @@ -50,7 +50,6 @@ " * Polygons \n", " * Segmentation masks\n", " * Free form text classifications\n", - " * tool nested classification\n", "\n", "Please note that this list of unsupported annotations only refers to limitations for importing annotations. For example, when using the Labelbox editor, segmentation masks can be created and edited on video assets." ], From da3b94b90a55c53ac6851543ab777e0758be7fc8 Mon Sep 17 00:00:00 2001 From: Andrea Ovalle <74880762+ovalle15@users.noreply.github.com> Date: Wed, 4 Jan 2023 18:06:22 -0500 Subject: [PATCH 5/5] Text to explain usage of schema ids --- examples/annotation_import/video.ipynb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/annotation_import/video.ipynb b/examples/annotation_import/video.ipynb index 148b41b47..f2ba46337 100644 --- a/examples/annotation_import/video.ipynb +++ b/examples/annotation_import/video.ipynb @@ -372,6 +372,12 @@ "cell_type": "code", "source": [ "########## Frame Classifications ##########\n", + "\n", + "# Frame base nested classifications do not support using the feature's name to extract ontology features. \n", + "# For this single case we are going to use the classification's featureSchemaId and the answers' featureSchemaId \n", + "# We will update the annotation object with the featureSchemaIds on step 5 after we create the ontology in step 2\n", + "\n", + "\n", "frame_bbox_with_checklist_subclass_ndjson = {\n", " \"name\": \"bbox_class\",\n", " \"segments\": [{\n", @@ -700,7 +706,6 @@ "cell_type": "code", "source": [ "## For nested frame base classifications we need to pass a featureSchemaId instead of the name. \n", - "## We are working towards implementing the \"name\" format for frame base classifciations\n", "\n", "features = project.ontology().normalized\n", "\n",