|
| 1 | +from labelbox import Client |
| 2 | + |
| 3 | +from typing import Dict, Any, Tuple |
| 4 | +from skimage import measure |
| 5 | +from io import BytesIO |
| 6 | +from PIL import Image |
| 7 | +import numpy as np |
| 8 | +import uuid |
| 9 | + |
| 10 | + |
| 11 | +def create_boxes_ndjson(datarow_id: str, schema_id: str, top: float, left: float, |
| 12 | + bottom: float, right: float) -> Dict[str, Any]: |
| 13 | + """ |
| 14 | + * https://docs.labelbox.com/data-model/en/index-en#bounding-box |
| 15 | +
|
| 16 | + Args: |
| 17 | + datarow_id (str): id of the data_row (in this case image) to add this annotation to |
| 18 | + schema_id (str): id of the bbox tool in the current ontology |
| 19 | + top, left, bottom, right (int): pixel coordinates of the bbox |
| 20 | + Returns: |
| 21 | + ndjson representation of a bounding box |
| 22 | + """ |
| 23 | + return { |
| 24 | + "uuid": str(uuid.uuid4()), |
| 25 | + "schemaId": schema_id, |
| 26 | + "dataRow": { |
| 27 | + "id": datarow_id |
| 28 | + }, |
| 29 | + "bbox": { |
| 30 | + "top": int(top), |
| 31 | + "left": int(left), |
| 32 | + "height": int(bottom - top), |
| 33 | + "width": int(right - left) |
| 34 | + } |
| 35 | + } |
| 36 | + |
| 37 | + |
| 38 | +def create_polygon_ndjson(datarow_id: str, schema_id: str, |
| 39 | + segmentation_mask: np.ndarray) -> Dict[str, Any]: |
| 40 | + """ |
| 41 | + * https://docs.labelbox.com/data-model/en/index-en#polygon |
| 42 | +
|
| 43 | + Args: |
| 44 | + datarow_id (str): id of the data_row (in this case image) to add this annotation to |
| 45 | + schema_id (str): id of the bbox tool in the current ontology |
| 46 | + segmentation_mask (np.ndarray): Segmentation mask of size (image_h, image_w) |
| 47 | + - Seg mask is turned into a polygon since polygons aren't directly inferred. |
| 48 | + Returns: |
| 49 | + ndjson representation of a polygon |
| 50 | + """ |
| 51 | + contours = measure.find_contours(segmentation_mask, 0.5) |
| 52 | + #Note that complex polygons could break. |
| 53 | + pts = contours[0].astype(np.int32) |
| 54 | + pts = np.roll(pts, 1, axis=-1) |
| 55 | + pts = [{'x': int(x), 'y': int(y)} for x, y in pts] |
| 56 | + return { |
| 57 | + "uuid": str(uuid.uuid4()), |
| 58 | + "schemaId": schema_id, |
| 59 | + "dataRow": { |
| 60 | + "id": datarow_id |
| 61 | + }, |
| 62 | + "polygon": pts |
| 63 | + } |
| 64 | + |
| 65 | + |
| 66 | +def create_mask_ndjson(client: Client, datarow_id: str, schema_id: str, |
| 67 | + segmentation_mask: np.ndarray, color: Tuple[int, int, |
| 68 | + int]) -> Dict[str, Any]: |
| 69 | + """ |
| 70 | + Creates a mask for each object in the image |
| 71 | + * https://docs.labelbox.com/data-model/en/index-en#segmentation-mask |
| 72 | +
|
| 73 | + Args: |
| 74 | + client (labelbox.Client): labelbox client used for uploading seg mask to google cloud storage |
| 75 | + datarow_id (str): id of the data_row (in this case image) to add this annotation to |
| 76 | + schema_id (str): id of the segmentation tool in the current ontology |
| 77 | + segmentation_mask is a segmentation mask of size (image_h, image_w) |
| 78 | + color ( Tuple[int,int,int]): rgb color to convert binary mask into 3D colorized mask |
| 79 | + Return: |
| 80 | + ndjson representation of a segmentation mask |
| 81 | + """ |
| 82 | + |
| 83 | + colorize = np.concatenate(([segmentation_mask[..., np.newaxis] * c for c in color]), |
| 84 | + axis=2) |
| 85 | + img_bytes = BytesIO() |
| 86 | + Image.fromarray(colorize).save(img_bytes, format="PNG") |
| 87 | + #* Use your own signed urls so that you can resign the data |
| 88 | + #* This is just to make the demo work |
| 89 | + url = client.upload_data(content=img_bytes.getvalue(), sign=True) |
| 90 | + return { |
| 91 | + "uuid": str(uuid.uuid4()), |
| 92 | + "schemaId": schema_id, |
| 93 | + "dataRow": { |
| 94 | + "id": datarow_id |
| 95 | + }, |
| 96 | + "mask": { |
| 97 | + "instanceURI": url, |
| 98 | + "colorRGB": color |
| 99 | + } |
| 100 | + } |
| 101 | + |
| 102 | + |
| 103 | +def create_point_ndjson(datarow_id: str, schema_id: str, top: float, left: float, |
| 104 | + bottom: float, right: float) -> Dict[str, Any]: |
| 105 | + """ |
| 106 | + * https://docs.labelbox.com/data-model/en/index-en#point |
| 107 | +
|
| 108 | + Args: |
| 109 | + datarow_id (str): id of the data_row (in this case image) to add this annotation to |
| 110 | + schema_id (str): id of the point tool in the current ontology |
| 111 | + t, l, b, r (int): top, left, bottom, right pixel coordinates of the bbox |
| 112 | + - The model doesn't directly predict points, so we grab the centroid of the predicted bounding box |
| 113 | + Returns: |
| 114 | + ndjson representation of a polygon |
| 115 | + """ |
| 116 | + return { |
| 117 | + "uuid": str(uuid.uuid4()), |
| 118 | + "schemaId": schema_id, |
| 119 | + "dataRow": { |
| 120 | + "id": datarow_id |
| 121 | + }, |
| 122 | + "point": { |
| 123 | + "x": int((left + right) / 2.), |
| 124 | + "y": int((top + bottom) / 2.), |
| 125 | + } |
| 126 | + } |
0 commit comments