Skip to content
13 changes: 12 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -889,7 +889,18 @@ Get tasks and export as YOLO format files.

```python
tasks = client.get_image_tasks(project="YOUR_PROJECT_SLUG")
client.export_yolo(tasks)
client.export_yolo(tasks, output_dir="YOUR_DIRECTROY")
```

Get tasks and export as YOLO format files with classes.txt
You can use fixed classes.txt and arrange order of each annotaiton file's order

```python
project_slug = "YOUR_PROJECT_SLUG"
tasks = client.get_image_tasks(project=project_slug)
annotations = client.get_annotations(project=project_slug)
classes = list(map(lambda annotation: annotation["value"], annotations))
client.export_yolo(tasks=tasks, classes=classes, output_dir="YOUR_DIRECTROY")
```

### Pascal VOC
Expand Down
54 changes: 35 additions & 19 deletions fastlabel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,8 @@ def create_video_classification_task(
"""
endpoint = "tasks/video/classification"
if not utils.is_video_supported_ext(file_path):
raise FastLabelInvalidException("Supported extensions are mp4.", 422)
raise FastLabelInvalidException(
"Supported extensions are mp4.", 422)
file = utils.base64_encode(file_path)
payload = {"project": project, "name": name, "file": file}
if status:
Expand Down Expand Up @@ -651,14 +652,17 @@ def export_coco(self, tasks: list, output_dir: str = os.path.join("output", "coc
with open(file_path, 'w') as f:
json.dump(coco, f, indent=4, ensure_ascii=False)

def export_yolo(self, tasks: list, output_dir: str = os.path.join("output", "yolo")) -> None:
def export_yolo(self, tasks: list, classes: list = [], output_dir: str = os.path.join("output", "yolo")) -> None:
"""
Convert tasks to YOLO format and export as files.
If you pass classes, classes.txt will be generated based on it .
If not , classes.txt will be generated based on passed tasks .(Annotations never used in your project will not be exported.)

tasks is a list of tasks. (Required)
classes is a list of annotation values. e.g. ['dog','bird'] (Optional)
output_dir is output directory(default: output/yolo). (Optional)
"""
annos, categories = converters.to_yolo(tasks)
annos, categories = converters.to_yolo(tasks, classes)
for anno in annos:
file_name = anno["filename"]
basename = utils.get_basename(file_name)
Expand Down Expand Up @@ -709,12 +713,12 @@ def export_labelme(self, tasks: list, output_dir: str = os.path.join("output", "
with open(file_path, 'w') as f:
json.dump(labelme, f, indent=4, ensure_ascii=False)


# Instance / Semantic Segmetation

def export_instance_segmentation(self, tasks: list, output_dir: str = os.path.join("output", "instance_segmentation"), pallete: List[int] = const.COLOR_PALETTE) -> None:
"""
Convert tasks to index color instance segmentation (PNG files).
Supports only bbox, polygon and segmentation annotation types. Hollowed points are not supported.
Supports only bbox, polygon and segmentation annotation types.
Supports up to 57 instances in default colors palette. Check const.COLOR_PALETTE for more details.

tasks is a list of tasks. (Required)
Expand All @@ -723,12 +727,13 @@ def export_instance_segmentation(self, tasks: list, output_dir: str = os.path.jo
"""
tasks = converters.to_pixel_coordinates(tasks)
for task in tasks:
self.__export_index_color_image(task=task, output_dir=output_dir, pallete=pallete, is_instance_segmentation=True)

self.__export_index_color_image(
task=task, output_dir=output_dir, pallete=pallete, is_instance_segmentation=True)

def export_semantic_segmentation(self, tasks: list, output_dir: str = os.path.join("output", "semantic_segmentation"), pallete: List[int] = const.COLOR_PALETTE) -> None:
"""
Convert tasks to index color semantic segmentation (PNG files).
Supports only bbox, polygon and segmentation annotation types. Hollowed points are not supported.
Supports only bbox, polygon and segmentation annotation types.
Check const.COLOR_PALETTE for color pallete.

tasks is a list of tasks. (Required)
Expand All @@ -744,7 +749,8 @@ def export_semantic_segmentation(self, tasks: list, output_dir: str = os.path.jo

tasks = converters.to_pixel_coordinates(tasks)
for task in tasks:
self.__export_index_color_image(task=task, output_dir=output_dir, pallete=pallete, is_instance_segmentation=False, classes=classes)
self.__export_index_color_image(
task=task, output_dir=output_dir, pallete=pallete, is_instance_segmentation=False, classes=classes)

def __export_index_color_image(self, task: list, output_dir: str, pallete: List[int], is_instance_segmentation: bool = True, classes: list = []) -> None:
image = Image.new("RGB", (task["width"], task["height"]), 0)
Expand All @@ -753,28 +759,39 @@ def __export_index_color_image(self, task: list, output_dir: str, pallete: List[

index = 1
for annotation in task["annotations"]:
color = index if is_instance_segmentation else classes.index(annotation["value"]) + 1
color = index if is_instance_segmentation else classes.index(
annotation["value"]) + 1
if annotation["type"] == AnnotationType.segmentation.value:
for region in annotation["points"]:
count = 0
for points in region:
cv_draw_points = self.__get_cv_draw_points(points)
if count == 0:
cv2.fillPoly(image, [cv_draw_points], color, lineType=cv2.LINE_8, shift=0)
cv_draw_points = self.__get_cv_draw_points(points)
cv2.fillPoly(
image, [cv_draw_points], color, lineType=cv2.LINE_8, shift=0)
else:
cv2.fillPoly(image, [cv_draw_points], 0, lineType=cv2.LINE_8, shift=0)
# Reverse hollow points for opencv because this points are counter clockwise
cv_draw_points = self.__get_cv_draw_points(
utils.reverse_points(points))
cv2.fillPoly(
image, [cv_draw_points], 0, lineType=cv2.LINE_8, shift=0)
count += 1
elif annotation["type"] == AnnotationType.polygon.value:
cv_draw_points = self.__get_cv_draw_points(annotation["points"])
cv2.fillPoly(image, [cv_draw_points], color, lineType=cv2.LINE_8, shift=0)
cv_draw_points = self.__get_cv_draw_points(
annotation["points"])
cv2.fillPoly(image, [cv_draw_points], color,
lineType=cv2.LINE_8, shift=0)
elif annotation["type"] == AnnotationType.bbox.value:
cv_draw_points = self.__get_cv_draw_points(annotation["points"])
cv2.fillPoly(image, [cv_draw_points], color, lineType=cv2.LINE_8, shift=0)
cv_draw_points = self.__get_cv_draw_points(
annotation["points"])
cv2.fillPoly(image, [cv_draw_points], color,
lineType=cv2.LINE_8, shift=0)
else:
continue
index += 1

image_path = os.path.join(output_dir, utils.get_basename(task["name"]) + ".png")
image_path = os.path.join(
output_dir, utils.get_basename(task["name"]) + ".png")
os.makedirs(os.path.dirname(image_path), exist_ok=True)
image = Image.fromarray(image)
image = image.convert('P')
Expand Down Expand Up @@ -823,7 +840,6 @@ def __get_cv_draw_points(self, points: List[int]) -> List[int]:
cv_points.append((new_points[i * 2], new_points[i * 2 + 1]))
return np.array(cv_points)


# Annotation

def find_annotation(self, annotation_id: str) -> dict:
Expand Down
198 changes: 137 additions & 61 deletions fastlabel/converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import copy
import geojson
import numpy as np
import math
from fastlabel.const import AnnotationType

# COCO
Expand Down Expand Up @@ -144,10 +145,12 @@ def __calc_area(annotation_type: str, points: list) -> float:
# YOLO


def to_yolo(tasks: list) -> tuple:
coco = to_coco(tasks)
yolo = __coco2yolo(coco)
return yolo
def to_yolo(tasks: list, classes: list) -> tuple:
if len(classes) == 0:
coco = to_coco(tasks)
return __coco2yolo(coco)
else:
return __to_yolo(tasks, classes)


def __coco2yolo(coco: dict) -> tuple:
Expand Down Expand Up @@ -198,6 +201,69 @@ def __coco2yolo(coco: dict) -> tuple:
return annos, categories


def __to_yolo(tasks: list, classes: list) -> tuple:
annos = []
for task in tasks:
if task["height"] == 0 or task["width"] == 0:
continue
objs = []
data = [{"annotation": annotation, "task": task, "classes": classes}
for annotation in task["annotations"]]
with ThreadPoolExecutor(max_workers=8) as executor:
results = executor.map(__get_yolo_annotation, data)
for result in results:
if not result:
continue
objs.append(" ".join(result))
anno = {
"filename": task["name"],
"object": objs
}
annos.append(anno)

categories = map(lambda val: {"name": val}, classes)

return annos, categories


def __get_yolo_annotation(data: dict) -> dict:
annotation = data["annotation"]
points = annotation["points"]
annotation_type = annotation["type"]
value = annotation["value"]
classes = list(data["classes"])
task = data["task"]
if annotation_type != AnnotationType.bbox.value and annotation_type != AnnotationType.polygon.value:
return None
if not points or len(points) == 0:
return None
if annotation_type == AnnotationType.bbox.value and (int(points[0]) == int(points[2]) or int(points[1]) == int(points[3])):
return None
if not annotation["value"] in classes:
return None

dw = 1. / task["width"]
dh = 1. / task["height"]

bbox = __to_bbox(points)
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[0] + bbox[2]
ymax = bbox[1] + bbox[3]

x = (xmin + xmax) / 2
y = (ymin + ymax) / 2
w = xmax - xmin
h = ymax - ymin

x = str(_truncate(x * dw, 7))
y = str(_truncate(y * dh, 7))
w = str(_truncate(w * dw, 7))
h = str(_truncate(h * dh, 7))
category_index = str(classes.index(value))
return [category_index, x, y, w, h]


def _truncate(n, decimals=0) -> float:
multiplier = 10 ** decimals
return int(n * multiplier) / multiplier
Expand All @@ -207,10 +273,75 @@ def _truncate(n, decimals=0) -> float:


def to_pascalvoc(tasks: list) -> list:
coco = to_coco(tasks)
pascalvoc = __coco2pascalvoc(coco)
pascalvoc = []
for task in tasks:
if task["height"] == 0 or task["width"] == 0:
continue

pascal_objs = []
data = [{"annotation": annotation}
for annotation in task["annotations"]]
with ThreadPoolExecutor(max_workers=8) as executor:
results = executor.map(__get_pascalvoc_obj, data)

for result in results:
if not result:
continue
pascal_objs.append(result)

voc = {
"annotation": {
"filename": task["name"],
"size": {
"width": task["width"],
"height": task["height"],
"depth": 3,
},
"segmented": 0,
"object": pascal_objs
}
}
pascalvoc.append(voc)
return pascalvoc

def __get_pascalvoc_obj(data: dict) -> dict:
annotation = data["annotation"]
points = annotation["points"]
annotation_type = annotation["type"]
if annotation_type != AnnotationType.bbox.value and annotation_type != AnnotationType.polygon.value:
return None
if not points or len(points) == 0:
return None
if annotation_type == AnnotationType.bbox.value and (int(points[0]) == int(points[2]) or int(points[1]) == int(points[3])):
return None
bbox = __to_bbox(points)
x = bbox[0]
y = bbox[1]
w = bbox[2]
h = bbox[3]

return {
"name": annotation["value"],
"pose": "Unspecified",
"truncated": __get_pascalvoc_tag_value(annotation, "truncated"),
"occluded": __get_pascalvoc_tag_value(annotation, "occluded"),
"difficult": __get_pascalvoc_tag_value(annotation, "difficult"),
"bndbox": {
"xmin": math.floor(x),
"ymin": math.floor(y),
"xmax": math.floor(x + w),
"ymax": math.floor(y + h),
},
}

def __get_pascalvoc_tag_value(annotation: dict, target_tag_name: str) -> int:
attributes = annotation["attributes"]
if not attributes:
return 0
related_attr = next(
(attribute for attribute in attributes if attribute["type"] == "switch" and attribute["key"] == target_tag_name), None)
return int(related_attr["value"]) if related_attr else 0


# labelme

Expand Down Expand Up @@ -388,58 +519,3 @@ def __get_pixel_coordinates(points: List[int or float]) -> List[int]:
new_points.append(int(prev_x + int(xdiff / mindiff * (i + 1))))
new_points.append(int(prev_y + int(ydiff / mindiff * (i + 1))))
return new_points

def __coco2pascalvoc(coco: dict) -> list:
pascalvoc = []

for image in coco["images"]:

# Get objects
objs = []
for annotation in coco["annotations"]:
if image["id"] != annotation["image_id"]:
continue
category = _get_category_by_id(
coco["categories"], annotation["category_id"])

x = annotation["bbox"][0]
y = annotation["bbox"][1]
w = annotation["bbox"][2]
h = annotation["bbox"][3]

obj = {
"name": category["name"],
"pose": "Unspecified",
"truncated": 0,
"difficult": 0,
"bndbox": {
"xmin": x,
"ymin": y,
"xmax": x + w,
"ymax": y + h,
},
}
objs.append(obj)

# get annotation
voc = {
"annotation": {
"filename": image["file_name"],
"size": {
"width": image["width"],
"height": image["height"],
"depth": 3,
},
"segmented": 0,
"object": objs
}
}
pascalvoc.append(voc)

return pascalvoc


def _get_category_by_id(categories: list, id_: str) -> str:
category = [
category for category in categories if category["id"] == id_][0]
return category
15 changes: 15 additions & 0 deletions fastlabel/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,18 @@ def get_basename(file_path: str) -> str:
path/to/file.jpg -> path/to/file
"""
return os.path.splitext(file_path)[0]


def reverse_points(points: list[int]) -> list[int]:
"""
e.g.)
[4, 5, 4, 9, 8, 9, 8, 5, 4, 5] => [4, 5, 8, 5, 8, 9, 4, 9, 4, 5]
"""
reversed_points = []
for index, _ in enumerate(points):
if index % 2 == 0:
reversed_points.insert(
0, points[index + 1])
reversed_points.insert(
0, points[index])
return reversed_points
Loading