diff --git a/examples/annotation_import/image.ipynb b/examples/annotation_import/image.ipynb index a0f7f317d..425383d00 100644 --- a/examples/annotation_import/image.ipynb +++ b/examples/annotation_import/image.ipynb @@ -85,6 +85,8 @@ "metadata": {}, "source": [ "import uuid\n", + "import requests\n", + "import base64\n", "import numpy as np\n", "import labelbox as lb\n", "import labelbox.types as lb_types" @@ -297,7 +299,7 @@ "bbox_annotation = lb_types.ObjectAnnotation(\n", " name=\"bounding_box\", # must match your ontology feature\"s name\n", " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=1690, y=977), # x = left, y = top \n", + " start=lb_types.Point(x=1690, y=977), # x = left, y = top\n", " end=lb_types.Point(x=1915, y=1307), # x= left + width , y = top + height\n", " ))\n", "\n", @@ -330,7 +332,7 @@ "bbox_with_radio_subclass_annotation = lb_types.ObjectAnnotation(\n", " name=\"bbox_with_radio_subclass\",\n", " value=lb_types.Rectangle(\n", - " start=lb_types.Point(x=541, y=933), # x = left, y = top \n", + " start=lb_types.Point(x=541, y=933), # x = left, y = top\n", " end=lb_types.Point(x=871, y=1124), # x= left + width , y = top + height\n", " ),\n", " classifications=[\n", @@ -373,7 +375,7 @@ "source": [ "# Python annotation\n", "polygon_annotation = lb_types.ObjectAnnotation(\n", - " name=\"polygon\", # must match your ontology feature\"s name \n", + " name=\"polygon\", # must match your ontology feature\"s name\n", " value=lb_types.Polygon( # Coordinates for the vertices of your polygon\n", " points=[\n", " lb_types.Point(x=1489.581, y=183.934),\n", @@ -430,23 +432,25 @@ { "metadata": {}, "source": [ - "# Identifying what values in the numpy array correspond to the mask annotation\n", - "color = (255, 255, 255)\n", - "mask_data = lb_types.MaskData(url=\"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg.png\")\n", + "### Raster Segmentation (Byte string array)\n", + "url = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg.png\"\n", + "response = requests.get(url)\n", "\n", - "# Python annotation\n", + "mask_data = lb.types.MaskData(im_bytes=response.content) # You can also use \"url\" instead of img_bytes to pass the PNG mask url.\n", "mask_annotation = lb_types.ObjectAnnotation(\n", - " name = \"mask\", # must match your ontology feature\"s name\n", - " value=lb_types.Mask(mask=mask_data, color=color),\n", + " name=\"mask\",\n", + " value=lb_types.Mask(\n", + " mask=mask_data,\n", + " color=(255, 255, 255))\n", ")\n", "\n", - "# NDJSON\n", + "# NDJSON using instanceURI, bytes array is not fully supported.\n", "mask_annotation_ndjson = {\n", " \"name\": \"mask\",\n", " \"classifications\": [],\n", - " \"mask\": {\"instanceURI\": \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/raster_seg.png\",\n", + " \"mask\": {\"instanceURI\": url,\n", " \"colorRGB\": (255, 255, 255)}\n", - "}" + "}\n" ], "cell_type": "code", "outputs": [], @@ -591,7 +595,7 @@ " type=lb_types.Relationship.Type.UNIDIRECTIONAL,\n", " ))\n", "\n", - "## Only supported for MAL imports \n", + "## Only supported for MAL imports\n", "uuid_source = str(uuid.uuid4())\n", "uuid_target = str(uuid.uuid4())\n", "\n", @@ -855,7 +859,7 @@ { "metadata": {}, "source": [ - "ndjson_label = []\n", + "label_ndjson = []\n", "annotations = [\n", " radio_annotation_ndjson,\n", " nested_radio_annotation_ndjson,\n", @@ -870,15 +874,15 @@ " polyline_annotation_ndjson,\n", " bbox_source_ndjson,\n", " bbox_target_ndjson,\n", - " relationship_ndjson, ## Only supported for MAL imports \n", + " relationship_ndjson, ## Only supported for MAL imports\n", "]\n", "for annotation in annotations:\n", " annotation.update({\n", " \"dataRow\": {\n", " \"globalKey\": global_key\n", - " },\n", + " }\n", " })\n", - " ndjson_label.append(annotation)" + " label_ndjson.append(annotation)" ], "cell_type": "code", "outputs": [], @@ -911,7 +915,7 @@ ")\n", "upload_job.wait_until_done()\n", "\n", - "print(f\"Errors: {upload_job.errors}\", )\n", + "print(f\"Errors: {upload_job.errors}\")\n", "print(f\"Status of uploads: {upload_job.statuses}\")" ], "cell_type": "code", @@ -928,14 +932,14 @@ { "metadata": {}, "source": [ - "# Uncomment if relationships are not being imported. \n", - "# Relationships will be supported for label import in the near future. \n", + "# Relationships are not supported with LabelImport\n", + "# For this demo either run MAL or Ground Truth, not both\n", "\n", "# Upload label for this data row in project\n", "# upload_job = lb.LabelImport.create_from_objects(\n", - "# client = client, \n", - "# project_id = project.uid, \n", - "# name=\"label_import_job\"+str(uuid.uuid4()), \n", + "# client = client,\n", + "# project_id = project.uid,\n", + "# name=\"label_import_job\"+str(uuid.uuid4()),\n", "# labels=label)\n", "\n", "# print(\"Errors:\", upload_job.errors)\n", @@ -954,6 +958,13 @@ "cell_type": "code", "outputs": [], "execution_count": null + }, + { + "metadata": {}, + "source": [], + "cell_type": "code", + "outputs": [], + "execution_count": null } ] } \ No newline at end of file diff --git a/examples/annotation_import/video.ipynb b/examples/annotation_import/video.ipynb index 3d378b47d..07909de4f 100644 --- a/examples/annotation_import/video.ipynb +++ b/examples/annotation_import/video.ipynb @@ -69,7 +69,9 @@ "source": [ "import labelbox as lb\n", "import labelbox.types as lb_types\n", - "import uuid" + "import uuid\n", + "import base64\n", + "import requests" ], "cell_type": "code", "outputs": [], @@ -108,7 +110,7 @@ "\n", "# Confidence scores are not supported for frame specific bounding box annotations and VideoObjectAnnotation\n", "\n", - "# bbox dimensions \n", + "# bbox dimensions\n", "bbox_dm = {\n", " \"top\":617,\n", " \"left\":1371,\n", @@ -119,32 +121,22 @@ "# Python Annotation\n", "bbox_annotation = [\n", " lb_types.VideoObjectAnnotation(\n", - " name = \"bbox_video\", \n", + " name = \"bbox_video\",\n", " keyframe=True,\n", " frame=13,\n", " segment_index=0,\n", " value = lb_types.Rectangle(\n", - " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]), # x = left, y = top \n", + " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]), # x = left, y = top\n", " end=lb_types.Point(x=bbox_dm[\"left\"] + bbox_dm[\"width\"], y=bbox_dm[\"top\"] + bbox_dm[\"height\"]), # x= left + width , y = top + height\n", " )\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", - " name = \"bbox_video\", \n", - " keyframe=True,\n", - " frame=15,\n", - " segment_index=0,\n", - " value = lb_types.Rectangle(\n", - " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n", - " end=lb_types.Point(x=bbox_dm[\"left\"] + bbox_dm[\"width\"], y=bbox_dm[\"top\"] + bbox_dm[\"height\"]),\n", - " )\n", - " ),\n", - " lb_types.VideoObjectAnnotation(\n", - " name = \"bbox_video\", \n", + " name = \"bbox_video\",\n", " keyframe=True,\n", " frame=19,\n", " segment_index=0,\n", " value = lb_types.Rectangle(\n", - " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]), \n", + " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n", " end=lb_types.Point(x=bbox_dm[\"left\"] + bbox_dm[\"width\"], y=bbox_dm[\"top\"] + bbox_dm[\"height\"]),\n", " )\n", " )\n", @@ -158,15 +150,11 @@ " \"keyframes\" : [\n", " {\n", " \"frame\": 13,\n", - " \"bbox\" : bbox_dm \n", - " },\n", - " {\n", - " \"frame\": 15,\n", - " \"bbox\" : bbox_dm \n", + " \"bbox\" : bbox_dm\n", " },\n", " {\n", " \"frame\": 19,\n", - " \"bbox\" : bbox_dm \n", + " \"bbox\" : bbox_dm\n", " }\n", " ]\n", " }\n", @@ -194,7 +182,7 @@ "\n", "# NDJSON\n", "point_annotation_ndjson = {\n", - " \"name\": \"point_video\", \n", + " \"name\": \"point_video\",\n", " \"segments\": [{\n", " \"keyframes\": [{\n", " \"frame\": 17,\n", @@ -203,7 +191,7 @@ " \"y\": 407.926\n", " }\n", " }]\n", - " }] \n", + " }]\n", "}" ], "cell_type": "code", @@ -219,55 +207,55 @@ "polyline_annotation = [\n", " lb_types.VideoObjectAnnotation(\n", " name = \"line_video_frame\",\n", - " keyframe=True, \n", + " keyframe=True,\n", " frame=5,\n", " segment_index=0,\n", - " value=lb_types.Line( \n", + " value=lb_types.Line(\n", " points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]\n", " )\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", " name = \"line_video_frame\",\n", - " keyframe=True, \n", + " keyframe=True,\n", " frame=12,\n", " segment_index=0,\n", - " value=lb_types.Line( \n", + " value=lb_types.Line(\n", " points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]\n", " )\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", " name = \"line_video_frame\",\n", - " keyframe=True, \n", + " keyframe=True,\n", " frame=20,\n", " segment_index=0,\n", - " value=lb_types.Line( \n", + " value=lb_types.Line(\n", " points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]\n", " )\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", " name = \"line_video_frame\",\n", - " keyframe=True, \n", + " keyframe=True,\n", " frame=24,\n", " segment_index=1,\n", - " value=lb_types.Line( \n", + " value=lb_types.Line(\n", " points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]\n", " )\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", " name = \"line_video_frame\",\n", - " keyframe=True, \n", + " keyframe=True,\n", " frame=45,\n", " segment_index=1,\n", - " value=lb_types.Line( \n", + " value=lb_types.Line(\n", " points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]\n", " )\n", " )\n", - " \n", + "\n", "]\n", "\n", "# NDJSON\n", "polyline_frame_annotation_ndjson = {\n", - " \"name\": \"line_video_frame\", \n", + " \"name\": \"line_video_frame\",\n", " \"segments\": [\n", " {\n", " \"keyframes\": [\n", @@ -351,13 +339,13 @@ "# Python Annotation\n", "radio_annotation = [\n", " lb_types.VideoClassificationAnnotation(\n", - " name=\"radio_class\", \n", + " name=\"radio_class\",\n", " frame=9,\n", " segment_index=0,\n", " value=lb_types.Radio(answer = lb_types.ClassificationAnswer(name = \"first_radio_answer\"))\n", " ),\n", " lb_types.VideoClassificationAnnotation(\n", - " name=\"radio_class\", \n", + " name=\"radio_class\",\n", " frame=15,\n", " segment_index=0,\n", " value=lb_types.Radio(answer = lb_types.ClassificationAnswer(name = \"first_radio_answer\"))\n", @@ -378,7 +366,7 @@ " )\n", " ),\n", " lb_types.VideoClassificationAnnotation(\n", - " name=\"checklist_class\", \n", + " name=\"checklist_class\",\n", " frame=35,\n", " segment_index=0,\n", " value=lb_types.Checklist(\n", @@ -390,8 +378,8 @@ " )\n", " ),\n", " lb_types.VideoClassificationAnnotation(\n", - " name=\"checklist_class\", \n", - " frame=39, \n", + " name=\"checklist_class\",\n", + " frame=39,\n", " segment_index=1,\n", " value=lb_types.Checklist(\n", " answer = [\n", @@ -402,12 +390,12 @@ " )\n", " ),\n", " lb_types.VideoClassificationAnnotation(\n", - " name=\"checklist_class\", \n", - " frame=45, \n", + " name=\"checklist_class\",\n", + " frame=45,\n", " segment_index=1,\n", " value=lb_types.Checklist(\n", " answer = [\n", - " \n", + "\n", " lb_types.ClassificationAnswer(\n", " name = \"second_checklist_answer\"\n", " )\n", @@ -419,17 +407,17 @@ "\n", "## NDJSON\n", "frame_radio_classification_ndjson = {\n", - " \"name\": \"radio_class\", \n", + " \"name\": \"radio_class\",\n", " \"answer\": { \"name\": \"first_radio_answer\", \"frames\": [{\"start\": 9, \"end\": 15}]}\n", "}\n", "\n", "## frame specific\n", "frame_checklist_classification_ndjson = {\n", - " \"name\": \"checklist_class\", \n", + " \"name\": \"checklist_class\",\n", " \"answer\": [\n", " { \"name\": \"first_checklist_answer\" , \"frames\": [{\"start\": 29, \"end\": 35 }]},\n", - " { \"name\": \"second_checklist_answer\", \"frames\": [{\"start\": 39, \"end\": 45 }]} \n", - " ] \n", + " { \"name\": \"second_checklist_answer\", \"frames\": [{\"start\": 39, \"end\": 45 }]}\n", + " ]\n", "}" ], "cell_type": "code", @@ -439,7 +427,7 @@ { "metadata": {}, "source": [ - "##### Global Classifications ####### \n", + "##### Global Classifications #######\n", "\n", "# Python Annotation\n", "## For global classifications use ClassificationAnnotation\n", @@ -449,12 +437,12 @@ ")]\n", "\n", "global_checklist_annotation=[lb_types.ClassificationAnnotation(\n", - " name=\"checklist_class_global\", \n", + " name=\"checklist_class_global\",\n", " value=lb_types.Checklist(\n", " answer = [\n", " lb_types.ClassificationAnswer(\n", " name = \"first_checklist_answer\"\n", - " ), \n", + " ),\n", " lb_types.ClassificationAnswer(\n", " name = \"second_checklist_answer\"\n", " )\n", @@ -464,17 +452,17 @@ "\n", "# NDJSON\n", "global_radio_classification_ndjson = {\n", - " \"name\": \"radio_class_global\", \n", + " \"name\": \"radio_class_global\",\n", " \"answer\": { \"name\": \"first_radio_answer\"}\n", "}\n", "\n", "\n", "global_checklist_classification_ndjson = {\n", - " \"name\": \"checklist_class_global\", \n", + " \"name\": \"checklist_class_global\",\n", " \"answer\": [\n", " { \"name\": \"first_checklist_answer\" },\n", - " { \"name\": \"second_checklist_answer\"} \n", - " ] \n", + " { \"name\": \"second_checklist_answer\"}\n", + " ]\n", "}" ], "cell_type": "code", @@ -484,7 +472,7 @@ { "metadata": {}, "source": [ - "########## Nested Global Classification ########### \n", + "########## Nested Global Classification ###########\n", "\n", "# Python Annotation\n", "nested_radio_annotation =[lb_types.ClassificationAnnotation(\n", @@ -567,25 +555,25 @@ " \"width\": 341.0\n", "}\n", "\n", - "# Python Annotation \n", + "# Python Annotation\n", "frame_bbox_with_checklist_subclass_annotation = [\n", " lb_types.VideoObjectAnnotation(\n", - " name = \"bbox_class\", \n", + " name = \"bbox_class\",\n", " keyframe=True,\n", " frame=10,\n", " segment_index=0,\n", " value = lb_types.Rectangle(\n", - " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]), # x = left, y = top \n", + " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]), # x = left, y = top\n", " end=lb_types.Point(x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"], y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"]), # x= left + width , y = top + height\n", " )\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", - " name = \"bbox_class\", \n", + " name = \"bbox_class\",\n", " keyframe=True,\n", " frame=11,\n", " segment_index=0,\n", " value = lb_types.Rectangle(\n", - " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]), \n", + " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n", " end=lb_types.Point(x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"], y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"]),\n", " ),\n", " classifications=[\n", @@ -597,12 +585,12 @@ " ]\n", " ),\n", " lb_types.VideoObjectAnnotation(\n", - " name = \"bbox_class\", \n", + " name = \"bbox_class\",\n", " keyframe=True,\n", " frame=13,\n", " segment_index=0,\n", " value = lb_types.Rectangle(\n", - " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]), \n", + " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n", " end=lb_types.Point(x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"], y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"]),\n", " ),\n", " classifications=[\n", @@ -623,7 +611,7 @@ " \"frame\": 10,\n", " \"bbox\": bbox_dm2\n", " },\n", - " { \n", + " {\n", " \"frame\": 11,\n", " \"bbox\": bbox_dm2,\n", " \"classifications\": [\n", @@ -633,7 +621,7 @@ " }\n", " ]\n", " },\n", - " { \n", + " {\n", " \"frame\": 13,\n", " \"bbox\": bbox_dm2,\n", " \"classifications\": [\n", @@ -655,37 +643,83 @@ { "metadata": {}, "source": [ - "##### Raster Segmentation ########\n", - "\n", - "instance_uri = \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/mask_example.png\"\n", + "### Raster Segmentation (Byte string array)\n", "\n", + "url = \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/color_mask.png\"\n", + "response = requests.get(url)\n", "\n", "\n", - "video_mask_annotation=[\n", + "video_mask_annotation_bytes = [\n", " lb_types.VideoMaskAnnotation(\n", " frames=[\n", - " lb_types.MaskFrame(index=10, instance_uri=instance_uri)\n", + " lb_types.MaskFrame(\n", + " index=20,\n", + " im_bytes=response.content # Instead of bytes you could also pass an instance URI : instance_uri=url\n", + " )\n", " ],\n", " instances=[\n", - " lb_types.MaskInstance(color_rgb=(255,255,255), name=\"video_mask\")\n", - " ] \n", + " lb_types.MaskInstance(color_rgb=(255, 255, 1), name= \"video_mask\")\n", + " ]\n", " )\n", "]\n", "\n", - "video_mask_annotation_ndjson = {\n", - " \"masks\": {\n", - " \"frames\": [{\n", - " \"index\": 10,\n", - " \"instanceURI\": instance_uri\n", - " }],\n", - " \"instances\": [\n", - " {\n", - " \"colorRGB\": (255, 255, 255),\n", - " \"name\": \"video_mask\",\n", - " }\n", + "video_mask_annotation_bytes_2 = [\n", + " lb_types.VideoMaskAnnotation(\n", + " frames=[\n", + " lb_types.MaskFrame(\n", + " index=23,\n", + " im_bytes=response.content\n", + " ),\n", + " lb_types.MaskFrame(\n", + " index=20,\n", + " im_bytes=response.content\n", + " )\n", + " ],\n", + " instances=[\n", + " lb_types.MaskInstance(color_rgb=(255, 1, 1), name= \"video_mask\")\n", " ]\n", + " )\n", + "]\n", + "\n", + "img_bytes = base64.b64encode(response.content).decode('utf-8')\n", + "# NDJSON\n", + "video_mask_ndjson_bytes = {\n", + " 'masks': {\n", + " 'frames': [\n", + " {\n", + " \"index\" : 20,\n", + " \"imBytes\": img_bytes,\n", + " }\n", + " ],\n", + " 'instances': [\n", + " {\n", + " \"colorRGB\" : [255, 255, 1],\n", + " \"name\" : \"video_mask\"\n", + " }\n", + " ]\n", " }\n", - "}" + " }\n", + "\n", + "video_mask_ndjson_bytes_2 = {\n", + " 'masks': {\n", + " 'frames': [\n", + " {\n", + " \"index\" : 20,\n", + " \"imBytes\": img_bytes,\n", + " },\n", + " {\n", + " \"index\" : 23,\n", + " \"imBytes\": img_bytes,\n", + " }\n", + " ],\n", + " 'instances': [\n", + " {\n", + " \"colorRGB\" : [255, 1, 1],\n", + " \"name\" : \"video_mask\"\n", + " }\n", + " ]\n", + " }\n", + " }" ], "cell_type": "code", "outputs": [], @@ -709,6 +743,95 @@ "outputs": [], "execution_count": null }, + { + "metadata": {}, + "source": [ + "##### Multiple annotations in the same frame using a single tool #####\n", + "# Fist instance of bounding box ranging from frame 22 to 27\n", + "bbox_annotation_1 = [\n", + " lb_types.VideoObjectAnnotation(\n", + " name = \"bbox_video\",\n", + " keyframe=True,\n", + " frame=22,\n", + " segment_index=0,\n", + " value = lb_types.Rectangle(\n", + " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]), # x = left, y = top\n", + " end=lb_types.Point(x=bbox_dm[\"left\"] + bbox_dm[\"width\"], y=bbox_dm[\"top\"] + bbox_dm[\"height\"]), # x= left + width , y = top + height\n", + " )\n", + " ),\n", + " lb_types.VideoObjectAnnotation(\n", + " name = \"bbox_video\",\n", + " keyframe=True,\n", + " frame=27,\n", + " segment_index=0,\n", + " value = lb_types.Rectangle(\n", + " start=lb_types.Point(x=bbox_dm[\"left\"], y=bbox_dm[\"top\"]),\n", + " end=lb_types.Point(x=bbox_dm[\"left\"] + bbox_dm[\"width\"], y=bbox_dm[\"top\"] + bbox_dm[\"height\"]),\n", + " )\n", + " )\n", + "]\n", + "# Second instance of bounding box ranging from frame 22 to 27\n", + "bbox_annotation_2 = [\n", + " lb_types.VideoObjectAnnotation(\n", + " name = \"bbox_video\",\n", + " keyframe=True,\n", + " frame=22,\n", + " segment_index=0,\n", + " value = lb_types.Rectangle(\n", + " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n", + " end=lb_types.Point(x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"], y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"]),\n", + " )\n", + " ),\n", + " lb_types.VideoObjectAnnotation(\n", + " name = \"bbox_video\",\n", + " keyframe=True,\n", + " frame=27,\n", + " segment_index=0,\n", + " value = lb_types.Rectangle(\n", + " start=lb_types.Point(x=bbox_dm2[\"left\"], y=bbox_dm2[\"top\"]),\n", + " end=lb_types.Point(x=bbox_dm2[\"left\"] + bbox_dm2[\"width\"], y=bbox_dm2[\"top\"] + bbox_dm2[\"height\"]),\n", + " )\n", + " )\n", + "]\n", + "\n", + "# NDJSON example:\n", + "bbox_frame_annotation_ndjson = {\n", + " \"name\": \"bbox_video\",\n", + " \"segments\": [{\n", + " \"keyframes\": [\n", + " {\n", + " \"frame\": 22,\n", + " \"bbox\": bbox_dm\n", + " },\n", + " {\n", + " \"frame\": 27,\n", + " \"bbox\": bbox_dm2\n", + " }\n", + "\n", + " ]\n", + " }]\n", + "}\n", + "\n", + "bbox_frame_annotation_ndjson2 = {\n", + " \"name\": \"bbox_video\",\n", + " \"segments\": [{\n", + " \"keyframes\": [\n", + " {\n", + " \"frame\": 22,\n", + " \"bbox\": bbox_dm\n", + " },\n", + " {\n", + " \"frame\": 27,\n", + " \"bbox\": bbox_dm2\n", + " }\n", + " ]\n", + " }]\n", + " }\n" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null + }, { "metadata": {}, "source": [ @@ -726,17 +849,17 @@ { "metadata": {}, "source": [ - "global_key = \"sample-video-2.mp4\"\n", + "global_key = \"sample-video-jellyfish.mp4\"\n", "asset = {\n", - " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\", \n", + " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4\",\n", " \"global_key\": global_key,\n", " \"media_type\": \"VIDEO\"\n", " }\n", "\n", "dataset = client.create_dataset(\n", - " name=\"video_demo_dataset\", \n", - " iam_integration=None # If this argument is removed, labelbox will use the default integration for your organization. \n", - ") \n", + " name=\"video_demo_dataset\",\n", + " iam_integration=None # If this argument is removed, labelbox will use the default integration for your organization.\n", + ")\n", "task = dataset.create_data_rows([asset])\n", "task.wait_till_done()\n", "print(\"Errors :\",task.errors)\n", @@ -783,12 +906,12 @@ " ]\n", " )\n", " ],\n", - " classifications=[ \n", + " classifications=[\n", " lb.Classification(\n", " class_type=lb.Classification.Type.CHECKLIST,\n", " name=\"checklist_class\",\n", " scope = lb.Classification.Scope.INDEX, ## Need to defined scope for frame classifications\n", - " options=[ \n", + " options=[\n", " lb.Option(value=\"first_checklist_answer\"),\n", " lb.Option(value=\"second_checklist_answer\")\n", " ]\n", @@ -797,7 +920,7 @@ " class_type=lb.Classification.Type.RADIO,\n", " name=\"radio_class\",\n", " scope = lb.Classification.Scope.INDEX,\n", - " options=[ \n", + " options=[\n", " lb.Option(value=\"first_radio_answer\"),\n", " lb.Option(value=\"second_radio_answer\")\n", " ]\n", @@ -815,7 +938,7 @@ " )\n", " ]\n", " )\n", - " ] \n", + " ]\n", " ),\n", " lb.Classification(\n", " class_type=lb.Classification.Type.CHECKLIST,\n", @@ -833,9 +956,9 @@ " ]\n", " ),\n", " lb.Classification(\n", - " class_type=lb.Classification.Type.RADIO, \n", + " class_type=lb.Classification.Type.RADIO,\n", " name=\"radio_class_global\",\n", - " options=[ \n", + " options=[\n", " lb.Option(value=\"first_radio_answer\"),\n", " lb.Option(value=\"second_radio_answer\")\n", " ]\n", @@ -852,11 +975,11 @@ " class_type=lb.Classification.Type.TEXT,\n", " name=\"free_text\"\n", " )\n", - " ] \n", + " ]\n", ")\n", "\n", - "ontology = client.create_ontology(\"Ontology Video Annotations\", \n", - " ontology_builder.asdict(), \n", + "ontology = client.create_ontology(\"Ontology Video Annotations\",\n", + " ontology_builder.asdict(),\n", " media_type=lb.MediaType.Video)" ], "cell_type": "code", @@ -932,30 +1055,32 @@ { "metadata": {}, "source": [ - "labels = []\n", + "label = []\n", "annotations_list = [\n", - " checklist_annotation, \n", + " checklist_annotation,\n", " radio_annotation,\n", - " bbox_annotation, \n", + " bbox_annotation,\n", " frame_bbox_with_checklist_subclass_annotation,\n", - " point_annotation, \n", + " bbox_annotation_1,\n", + " bbox_annotation_2,\n", + " point_annotation,\n", " polyline_annotation,\n", " global_checklist_annotation,\n", " global_radio_annotation,\n", - " video_mask_annotation,\n", " nested_checklist_annotation,\n", " nested_radio_annotation,\n", - " text_annotation\n", + " text_annotation,\n", + " video_mask_annotation_bytes,\n", + " video_mask_annotation_bytes_2\n", " ]\n", "\n", - "flatten_list_annotations = [ann for ann_sublist in annotations_list for ann in ann_sublist] \n", - "\n", - "labels.append(\n", - " lb_types.Label(\n", - " data=lb_types.VideoData(global_key=global_key),\n", - " annotations = flatten_list_annotations\n", - " )\n", - ")" + "for annotation in annotations_list:\n", + " label.append(\n", + " lb_types.Label(\n", + " data=lb_types.VideoData(global_key=global_key),\n", + " annotations = annotation\n", + " )\n", + " )\n" ], "cell_type": "code", "outputs": [], @@ -981,20 +1106,26 @@ "source": [ "label_ndjson = []\n", "\n", - "for annotation in [\n", + "annotations_list_ndjson = [\n", " point_annotation_ndjson,\n", " bbox_annotation_ndjson,\n", - " polyline_frame_annotation_ndjson, \n", - " frame_checklist_classification_ndjson, \n", + " polyline_frame_annotation_ndjson,\n", + " frame_checklist_classification_ndjson,\n", " frame_radio_classification_ndjson,\n", " nested_radio_annotation_ndjson,\n", " nested_checklist_annotation_ndjson,\n", " frame_bbox_with_checklist_subclass_annotation_ndjson,\n", " global_radio_classification_ndjson,\n", " global_checklist_classification_ndjson,\n", - " video_mask_annotation_ndjson,\n", - " text_annotation_ndjson\n", - "]: \n", + " text_annotation_ndjson,\n", + " bbox_frame_annotation_ndjson,\n", + " bbox_frame_annotation_ndjson2,\n", + " video_mask_ndjson_bytes,\n", + " video_mask_ndjson_bytes_2,\n", + "\n", + "]\n", + "\n", + "for annotation in annotations_list_ndjson:\n", " annotation.update({\n", " \"dataRow\": {\n", " \"globalKey\": global_key\n", @@ -1026,10 +1157,10 @@ "source": [ "# Upload MAL label for this data row in project\n", "upload_job_mal = lb.MALPredictionImport.create_from_objects(\n", - " client = client, \n", - " project_id = project.uid, \n", - " name=\"mal_import_job-\" + str(uuid.uuid4()), \n", - " predictions=labels)\n", + " client = client,\n", + " project_id = project.uid,\n", + " name=\"mal_import_job-\" + str(uuid.uuid4()),\n", + " predictions=label)\n", "\n", "upload_job_mal.wait_until_done()\n", "print(\"Errors:\", upload_job_mal.errors)\n", @@ -1050,17 +1181,19 @@ { "metadata": {}, "source": [ - "upload_job_label_import = lb.LabelImport.create_from_objects(\n", - " client = client,\n", - " project_id = project.uid, \n", - " name = \"label_import_job-\" + str(uuid.uuid4()),\n", - " labels=labels\n", - ")\n", + "# For this demo either run MAL or Ground truth import, not both.\n", "\n", - "upload_job_label_import.wait_until_done()\n", - "print(\"Errors:\", upload_job_label_import.errors)\n", - "print(\"Status of uploads: \", upload_job_label_import.statuses)\n", - "print(\" \")" + "# upload_job_label_import = lb.LabelImport.create_from_objects(\n", + "# client = client,\n", + "# project_id = project.uid,\n", + "# name = \"label_import_job-\" + str(uuid.uuid4()),\n", + "# labels=label\n", + "# )\n", + "\n", + "# upload_job_label_import.wait_until_done()\n", + "# print(\"Errors:\", upload_job_label_import.errors)\n", + "# print(\"Status of uploads: \", upload_job_label_import.statuses)\n", + "# print(\" \")" ], "cell_type": "code", "outputs": [], @@ -1078,11 +1211,19 @@ "source": [ "# Delete Project\n", "# project.delete()\n", - "# dataset.delete()\n" + "# dataset.delete()\n", + "\n" ], "cell_type": "code", "outputs": [], "execution_count": null + }, + { + "metadata": {}, + "source": [], + "cell_type": "code", + "outputs": [], + "execution_count": null } ] } \ No newline at end of file