diff --git a/README.md b/README.md old mode 100644 new mode 100755 diff --git a/backend/database/annotations.py b/backend/database/annotations.py old mode 100644 new mode 100755 index 1b9d3737..c1283f13 --- a/backend/database/annotations.py +++ b/backend/database/annotations.py @@ -1,8 +1,6 @@ import imantics as im import json - from mongoengine import * - from .datasets import DatasetModel from .categories import CategoryModel from .events import Event @@ -11,7 +9,7 @@ class AnnotationModel(DynamicDocument): - COCO_PROPERTIES = ["id", "image_id", "category_id", "segmentation", + COCO_PROPERTIES = ["id", "image_id", "category_id", "segmentation", "rle", "iscrowd", "color", "area", "bbox", "metadata", "keypoints", "isbbox"] @@ -20,12 +18,13 @@ class AnnotationModel(DynamicDocument): category_id = IntField(required=True) dataset_id = IntField() - segmentation = ListField(default=[]) + segmentation = ListField(default=[]) #segmentation in polygon format + rle = DictField(default={}) #segmentation in RLE format (Compressed RLE) area = IntField(default=0) bbox = ListField(default=[0, 0, 0, 0]) iscrowd = BooleanField(default=False) isbbox = BooleanField(default=False) - + creator = StringField(required=True) width = IntField() height = IntField() @@ -59,7 +58,6 @@ def __init__(self, image_id=None, **data): super(AnnotationModel, self).__init__(**data) def save(self, copy=False, *args, **kwargs): - if self.dataset_id and not copy: dataset = DatasetModel.objects(id=self.dataset_id).first() @@ -88,7 +86,7 @@ def mask(self): ] mask = cv2.fillPoly(mask, pts, 1) return mask - + def clone(self): """ Creates a clone """ create = json.loads(self.to_json()) diff --git a/backend/database/datasets.py b/backend/database/datasets.py old mode 100644 new mode 100755 index d761b7dc..3bd933e9 --- a/backend/database/datasets.py +++ b/backend/database/datasets.py @@ -64,7 +64,7 @@ def import_coco(self, coco_json): "name": task.name } - def export_coco(self, categories=None, style="COCO"): + def export_coco(self, categories=None, style="COCO", with_empty_images=False): from workers.tasks import export_annotations @@ -78,14 +78,30 @@ def export_coco(self, categories=None, style="COCO"): ) task.save() - cel_task = export_annotations.delay(task.id, self.id, categories) + cel_task = export_annotations.delay(task.id, self.id, categories, with_empty_images) return { "celery_id": cel_task.id, "id": task.id, "name": task.name } - + def export_semantic_segmentation(self, categories=None) : + from workers.tasks import export_semantic_segmentation + if categories is None or len(categories) == 0: + categories = self.categories + task = TaskModel( + name=f"Exporting semantic segmentation of {self.name} ", + dataset_id=self.id, + group="Semantic Segmentation Export" + ) + task.save() + cel_task = export_semantic_segmentation.delay(task.id, self.id, categories) + return { + "celery_id": cel_task.id, + "id": task.id, + "name": task.name + } + def scan(self): from workers.tasks import scan_dataset diff --git a/backend/webserver/api/annotations.py b/backend/webserver/api/annotations.py old mode 100644 new mode 100755 diff --git a/backend/webserver/api/annotator.py b/backend/webserver/api/annotator.py old mode 100644 new mode 100755 index b173f3ef..7ca0d365 --- a/backend/webserver/api/annotator.py +++ b/backend/webserver/api/annotator.py @@ -1,8 +1,13 @@ import datetime +import base64 +import io +import pycocotools.mask as mask +import numpy as np from flask_restplus import Namespace, Resource from flask_login import login_required, current_user from flask import request +from PIL import Image from ..util import query_util, coco_util, profile, thumbnails @@ -16,7 +21,6 @@ api = Namespace('annotator', description='Annotator related operations') - @api.route('/data') class AnnotatorData(Resource): @@ -48,7 +52,6 @@ def post(self): current_user.update(preferences=data.get('user', {})) - annotated = False num_annotations = 0 # Iterate every category passed in the data for category in data.get('categories', []): @@ -108,25 +111,63 @@ def post(self): ) paperjs_object = annotation.get('compoundPath', []) - # Update paperjs if it exists + area = 0 + bbox = [] + width = db_annotation.width + height = db_annotation.height if len(paperjs_object) == 2: - - width = db_annotation.width - height = db_annotation.height - - # Generate coco formatted segmentation data - segmentation, area, bbox = coco_util.\ - paperjs_to_coco(width, height, paperjs_object) + # Store segmentation in compressed RLE format + if (annotation.get('raster', {}) != {}) : + area = annotation.get('area', 0) + bbox = annotation.get('bbox') + ann_x = int(bbox[0]) + ann_y = int(bbox[1]) + ann_height = int(bbox[2]) + ann_width = int(bbox[3]) + dataurl = annotation.get('raster') + + # Convert base64 image to RGB image + image_b64 = dataurl.split(",")[1] + binary = io.BytesIO(base64.b64decode(image_b64)) + sub_image = Image.open(binary) + sub_image = np.array(sub_image).reshape((ann_height, ann_width, 4)) + + # convert RGB image to binary image( each pixel is either 0 or 1) + sub_binary_mask = np.sum(sub_image[:, :, :3], 2) + sub_binary_mask[sub_binary_mask>0] = 1 + + # Insert the sub binary mask into its position in the full image + full_binary_mask = np.zeros((height,width), np.uint8) + # Handle annotations exceeding image borders + y_0 = ann_y + y_end = ann_y+ann_height + x_0 = ann_x + x_end = ann_x+ann_width + full_binary_mask[y_0 : y_end, x_0 : x_end] = sub_binary_mask + rle = mask.encode(np.asfortranarray(full_binary_mask.astype('uint8'))) + # Convert rle['counts] from a bytes list to a byte String + rle['counts'] = rle.get('counts').decode() + db_annotation.update( + set__rle = rle, + set__iscrowd = True, + set__segmentation= [] #Clear segmentation when moving from polygon format to rle + ) + # Store segmentation in polygon format + else : + segmentation, area, bbox = coco_util.\ + paperjs_to_coco(width, height, paperjs_object) + + db_annotation.update( + set__segmentation=segmentation + ) db_annotation.update( - set__segmentation=segmentation, - set__area=area, - set__isbbox=annotation.get('isbbox', False), - set__bbox=bbox, - set__paper_object=paperjs_object, + set__area = area, + set__isbbox = annotation.get('isbbox', False), + set__bbox = bbox, + set__paper_object = paperjs_object ) - if area > 0: counted = True @@ -142,10 +183,8 @@ def post(self): ) thumbnails.generate_thumbnail(image_model) - return {"success": True} - @api.route('/data/') class AnnotatorId(Resource): @@ -190,18 +229,21 @@ def get(self, image_id): data['image']['previous'] = pre.id if pre else None data['image']['next'] = nex.id if nex else None + # Optimize query: query all annotation of specific image, and then categorize them according to the categories. + all_annotations = AnnotationModel.objects(image_id=image_id, deleted=False).exclude('events').all() + for category in categories: category = query_util.fix_ids(category[1]) - category_id = category.get('id') - annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\ - .exclude('events').all() + + annotations = [] + for annotation in all_annotations: + if annotation['category_id'] == category_id: + annotations.append(query_util.fix_ids(annotation)) category['show'] = True category['visualize'] = False - category['annotations'] = [] if annotations is None else query_util.fix_ids(annotations) + category['annotations'] = [] if annotations is None else annotations data.get('categories').append(category) return data - - diff --git a/backend/webserver/api/datasets.py b/backend/webserver/api/datasets.py old mode 100644 new mode 100755 index a601adb0..5e020a2b --- a/backend/webserver/api/datasets.py +++ b/backend/webserver/api/datasets.py @@ -1,5 +1,5 @@ from flask import request -from flask_restplus import Namespace, Resource, reqparse +from flask_restplus import Namespace, Resource, reqparse, inputs from flask_login import login_required, current_user from werkzeug.datastructures import FileStorage from mongoengine.errors import NotUniqueError @@ -46,6 +46,7 @@ export = reqparse.RequestParser() export.add_argument('categories', type=str, default=None, required=False, help='Ids of categories to export') +export.add_argument('with_empty_images', type=inputs.boolean, default=False, required=False, help='Export with un-annotated images') update_dataset = reqparse.RequestParser() update_dataset.add_argument('categories', location='json', type=list, help="New list of categories") @@ -179,6 +180,23 @@ def get(self, dataset_id): # Calculate annotation counts by category in this dataset category_count = dict() image_category_count = dict() + + + + user_stats = dict() + + for user in dataset.get_users(): + user_annots = AnnotationModel.objects(dataset_id=dataset_id, deleted=False, creator=user.username) + image_count = dict() + for annot in user_annots: + image_count[annot.image_id] = image_count.get(annot.image_id, 0) + 1 + + user_stats[user.username] = { + "annotations": len(user_annots), + "images": len(image_count) + } + + for category in dataset.categories: # Calculate the annotation count in the current category in this dataset @@ -207,7 +225,8 @@ def get(self, dataset_id): 'Time (ms) per Annotation': annotations.average('milliseconds') or 0 }, 'categories': category_count, - 'images_per_category': image_category_count + 'images_per_category': image_category_count, + 'users': user_stats } return stats @@ -497,6 +516,7 @@ def get(self, dataset_id): args = export.parse_args() categories = args.get('categories') + with_empty_images = args.get('with_empty_images', False) if len(categories) == 0: categories = [] @@ -509,7 +529,7 @@ def get(self, dataset_id): if not dataset: return {'message': 'Invalid dataset ID'}, 400 - return dataset.export_coco(categories=categories) + return dataset.export_coco(categories=categories, with_empty_images=with_empty_images) @api.expect(coco_upload) @login_required @@ -524,6 +544,28 @@ def post(self, dataset_id): return dataset.import_coco(json.load(coco)) +@api.route('//semanticSegmentation') +class DatasetSemanticSegmentation(Resource): + + @api.expect(export) + @login_required + def get(self, dataset_id): + + args = export.parse_args() + categories = args.get('categories') + + if len(categories) == 0: + categories = [] + + if len(categories) > 0 or isinstance(categories, str): + categories = [int(c) for c in categories.split(',')] + + dataset = DatasetModel.objects(id=dataset_id).first() + + if not dataset: + return {'message': 'Invalid dataset ID'}, 400 + + return dataset.export_semantic_segmentation(categories=categories) @api.route('//coco') class DatasetCoco(Resource): diff --git a/backend/webserver/api/exports.py b/backend/webserver/api/exports.py old mode 100644 new mode 100755 index f168bb35..1bf66c79 --- a/backend/webserver/api/exports.py +++ b/backend/webserver/api/exports.py @@ -1,13 +1,13 @@ from flask import send_file -from flask_restplus import Namespace, Resource, reqparse +from flask_restplus import Namespace, Resource from flask_login import login_required, current_user import datetime + from ..util import query_util from database import ( ExportModel, - DatasetModel, fix_ids ) @@ -66,6 +66,8 @@ def get(self, export_id): if not current_user.can_download(dataset): return {"message": "You do not have permission to download the dataset's annotations"}, 403 + if len(list(export.tags)) >0 and list(export.tags)[0] == "SemanticSeg": + return send_file(export.path, attachment_filename=f"{dataset.name}-{'-'.join(export.tags)}.zip", as_attachment=True) - return send_file(export.path, attachment_filename=f"{dataset.name.encode('utf-8')}-{'-'.join(export.tags).encode('utf-8')}.json", as_attachment=True) + return send_file(export.path, attachment_filename=f"{dataset.name}-{'-'.join(export.tags)}.json", as_attachment=True) diff --git a/backend/webserver/api/images.py b/backend/webserver/api/images.py old mode 100644 new mode 100755 index db3122b9..396dfda8 --- a/backend/webserver/api/images.py +++ b/backend/webserver/api/images.py @@ -2,23 +2,25 @@ from flask_login import login_required, current_user from werkzeug.datastructures import FileStorage from flask import send_file +from mongoengine.errors import NotUniqueError +import pycocotools.mask as mask from ..util import query_util, coco_util from database import ( + fix_ids, ImageModel, + CategoryModel, DatasetModel, AnnotationModel ) - -from PIL import Image +import numpy as np +from PIL import Image, ImageColor import datetime import os import io - api = Namespace('image', description='Image related operations') - image_all = reqparse.RequestParser() image_all.add_argument('fields', required=False, type=str) image_all.add_argument('page', default=1, type=int) @@ -95,7 +97,10 @@ def post(self): image.close() pil_image.close() - db_image = ImageModel.create_from_path(path, dataset_id).save() + try: + db_image = ImageModel.create_from_path(path, dataset_id).save() + except NotUniqueError: + db_image = ImageModel.objects.get(path=path) return db_image.id @@ -196,3 +201,81 @@ def get(self, image_id): return coco_util.get_image_coco(image_id) +@api.route('/semanticSegmentation/') +class ImageSemanticSegmentation(Resource): + @api.expect(image_download) + @login_required + def get(self, image_id): + """ Returns semantic segmentation image by image's ID """ + args = image_download.parse_args() + as_attachment = args.get('asAttachment') + + image = ImageModel.objects(id=image_id)\ + .only(*ImageModel.COCO_PROPERTIES) + + if image is None: + return {'success': False}, 400 + + image = fix_ids(image)[0] + # Image dimensions + width = image.get('width') + height = image.get('height') + + dataset = DatasetModel.objects(id=image.get('dataset_id')).first() + + bulk_categories = CategoryModel.objects(id__in=dataset.categories, deleted=False) \ + .only(*CategoryModel.COCO_PROPERTIES) + + db_annotations = AnnotationModel.objects(deleted=False, image_id=image_id) + + final_image_array = np.zeros((height, width)) + category_index = 1 + # category_colors = [black, color1, color2, ...] , found_categories = [1, 3] + # if found annotations belong to the 1st and third categories in bulk_categories + category_colors = [(0, 0, 0)] + found_categories = [] + # Loop to generate semantic segmentation mask: + # example: pixels belonging to the category of index 2, will have the value 2 + for category in fix_ids(bulk_categories): + category_colors.append(ImageColor.getcolor(category.get('color'), 'RGB')) + category_annotations = db_annotations\ + .filter(category_id=category.get('id'))\ + .only(*AnnotationModel.COCO_PROPERTIES) + + if category_annotations.count() == 0: + category_index += 1 + continue + found_categories.append(category_index) + category_annotations = fix_ids(category_annotations) + for annotation in category_annotations: + + has_polygon_segmentation = len(annotation.get('segmentation', [])) > 0 + has_rle_segmentation = annotation.get('rle', {}) != {} + if has_rle_segmentation: + CompressedRle = annotation.get('rle') + bin_mask = mask.decode(CompressedRle) + idx = bin_mask == 1 + final_image_array[idx] = category_index + elif has_polygon_segmentation: + bin_mask = coco_util.get_bin_mask(list(annotation.get('segmentation')), height, width) + idx = bin_mask == 1 + final_image_array[idx] = category_index + category_index += 1 + # Transfom the 2D array to an RGB image + r = np.zeros_like(final_image_array).astype(np.uint8) + g = np.zeros_like(final_image_array).astype(np.uint8) + b = np.zeros_like(final_image_array).astype(np.uint8) + + for l in found_categories: + idx = final_image_array == l + x, y, z = category_colors[l] + r[idx] = x + g[idx] = y + b[idx] = z + + rgb = np.stack([r, g, b], axis=2) + + image_io = io.BytesIO() + Image.fromarray(rgb.astype('uint8')).save(image_io, "PNG", quality=95) + image_io.seek(0) + return send_file(image_io, attachment_filename=image.get('file_name'), as_attachment=as_attachment) \ No newline at end of file diff --git a/backend/webserver/util/coco_util.py b/backend/webserver/util/coco_util.py old mode 100644 new mode 100755 index 5540deb3..5a6923e3 --- a/backend/webserver/util/coco_util.py +++ b/backend/webserver/util/coco_util.py @@ -1,7 +1,7 @@ import pycocotools.mask as mask import numpy as np -import shapely from shapely.geometry import LineString, Point +import skimage.draw as sd from database import ( fix_ids, @@ -10,8 +10,6 @@ CategoryModel, AnnotationModel ) - - def paperjs_to_coco(image_width, image_height, paperjs): """ Given a paperjs CompoundPath, converts path into coco segmentation format based on children paths @@ -24,10 +22,10 @@ def paperjs_to_coco(image_width, image_height, paperjs): assert image_width > 0 assert image_height > 0 assert len(paperjs) == 2 - # Compute segmentation # paperjs points are relative to the center, so we must shift them relative to the top left. - segments = [] + segments_with_area = [] + pts_or_lines = [] center = [image_width/2, image_height/2] if paperjs[0] == "Path": @@ -44,8 +42,8 @@ def paperjs_to_coco(image_width, image_height, paperjs): for point in child_segments: - # Cruve - if len(point) == 4: + # Curve or segment with handles + if len(point) == 4 or len(point) == 3: point = point[0] # Point @@ -58,27 +56,28 @@ def paperjs_to_coco(image_width, image_height, paperjs): if sum(segments_to_add) == 0: continue - if len(segments_to_add) == 4: + if len(segments_to_add) == 4 or len(segments_to_add) == 2: # len 4 means this is a line with no width; it contributes # no area to the mask, and if we include it, coco will treat # it instead as a bbox (and throw an error) + pts_or_lines.append(segments_to_add) continue num_widths = segments_to_add.count(image_width) num_heights = segments_to_add.count(image_height) + if num_widths + num_heights == len(segments_to_add): continue - segments.append(segments_to_add) + segments_with_area.append(segments_to_add) - if len(segments) < 1: - return [], 0, [0, 0, 0, 0] - - area, bbox = get_segmentation_area_and_bbox( - segments, image_height, image_width) - - return segments, area, bbox + if len(segments_with_area) < 1: + return pts_or_lines, 0, None + else : + area, bbox = get_segmentation_area_and_bbox( + segments_with_area, image_height, image_width) + return segments_with_area + pts_or_lines, area, bbox def paperjs_to_coco_cliptobounds(image_width, image_height, paperjs): # todo: there's lots of edge cases to this. It needs a different solution or many many if statements :P """ @@ -137,7 +136,6 @@ def paperjs_to_coco_cliptobounds(image_width, image_height, paperjs): # todo: th p = i % len(child_segments) point = child_segments[p] - # print('point:', point, flush=True) # Cruve if len(point) == 4: point = point[0] @@ -191,9 +189,42 @@ def get_segmentation_area_and_bbox(segmentation, image_height, image_width): # Convert into rle rles = mask.frPyObjects(segmentation, image_height, image_width) rle = mask.merge(rles) - return mask.area(rle), mask.toBbox(rle) +def get_bin_mask(segmentation, image_height, image_width): + """ + Computes the binary mask of an annotation in polyfon format. + It separates segmentations in line and point format (they are not supported by PyCOCOTools) + :return: binary mask np.array format + """ + bin_mask = np.zeros((image_height, image_width)) + points = [] + lines = [] + polygons = [] + + for segment in segmentation : + if len(segment) == 2: + points.append(segment) + elif len(segment) == 4: + lines.append(segment) + else : + polygons.append(segment) + + if len(polygons) != 0 : + # Convert into rle + rles = mask.frPyObjects(polygons, image_height, image_width) + rle = mask.merge(rles) + # Extract the binary mask + bin_mask = mask.decode(rle) + + for point in points: + bin_mask[round(point[1])][round(point[0])] = 1 + + for line in lines: + rr, cc = sd.line(round(line[0]), round(line[1]), round(line[2]), round(line[3])) + bin_mask[cc,rr] = 1 + + return bin_mask def get_annotations_iou(annotation_a, annotation_b): """ @@ -245,17 +276,20 @@ def get_image_coco(image_id): category_annotations = fix_ids(category_annotations) for annotation in category_annotations: - - has_segmentation = len(annotation.get('segmentation', [])) > 0 + has_polygon_segmentation = len(annotation.get('segmentation', [])) > 0 + has_rle_segmentation = annotation.get('rle', {}) != {} has_keypoints = len(annotation.get('keypoints', [])) > 0 - if has_segmentation or has_keypoints: + if has_polygon_segmentation or has_keypoints or has_rle_segmentation: if has_keypoints: arr = np.array(annotation.get('keypoints', [])) arr = arr[2::3] annotation['num_keypoints'] = len(arr[arr > 0]) - + if has_rle_segmentation: + rle = annotation.get('rle') + annotation['segmentation'] = rle + annotation.pop('rle') annotations.append(annotation) if len(category.get('keypoint_labels')) > 0: diff --git a/backend/webserver/util/mask_rcnn.py b/backend/webserver/util/mask_rcnn.py old mode 100644 new mode 100755 diff --git a/backend/workers/tasks/__init__.py b/backend/workers/tasks/__init__.py old mode 100644 new mode 100755 index b00d7307..2660ac22 --- a/backend/workers/tasks/__init__.py +++ b/backend/workers/tasks/__init__.py @@ -1,5 +1,6 @@ from .data import * +from .semantic_segmentation import * from .test import * from .scan import * from .thumbnails import * \ No newline at end of file diff --git a/backend/workers/tasks/data.py b/backend/workers/tasks/data.py old mode 100644 new mode 100755 index 0df11efe..b6570f6e --- a/backend/workers/tasks/data.py +++ b/backend/workers/tasks/data.py @@ -19,9 +19,8 @@ from ..socket import create_socket from mongoengine import Q - @shared_task -def export_annotations(task_id, dataset_id, categories): +def export_annotations(task_id, dataset_id, categories, with_empty_images=False): task = TaskModel.objects.get(id=task_id) dataset = DatasetModel.objects.get(id=dataset_id) @@ -52,6 +51,7 @@ def export_annotations(task_id, dataset_id, categories): # iterate though all categoires and upsert category_names = [] + for category in fix_ids(db_categories): if len(category.get('keypoint_labels', [])) > 0: @@ -83,16 +83,18 @@ def export_annotations(task_id, dataset_id, categories): annotations = fix_ids(annotations) if len(annotations) == 0: + if with_empty_images: + coco.get('images').append(image) continue num_annotations = 0 for annotation in annotations: - + has_keypoints = len(annotation.get('keypoints', [])) > 0 has_segmentation = len(annotation.get('segmentation', [])) > 0 + has_rle_segmentation = annotation.get('rle', {}) != {} - if has_keypoints or has_segmentation: - + if has_keypoints or has_segmentation or has_rle_segmentation: if not has_keypoints: if 'keypoints' in annotation: del annotation['keypoints'] @@ -101,8 +103,15 @@ def export_annotations(task_id, dataset_id, categories): arr = arr[2::3] annotation['num_keypoints'] = len(arr[arr > 0]) + if has_rle_segmentation: + rle = annotation.get('rle') + annotation['segmentation'] = rle + annotation.pop('rle') + num_annotations += 1 + coco.get('annotations').append(annotation) + task.info( f"Exporting {num_annotations} annotations for image {image.get('id')}") diff --git a/backend/workers/tasks/semantic_segmentation.py b/backend/workers/tasks/semantic_segmentation.py new file mode 100755 index 00000000..c58a99de --- /dev/null +++ b/backend/workers/tasks/semantic_segmentation.py @@ -0,0 +1,169 @@ +from database import ( + fix_ids, + ImageModel, + CategoryModel, + AnnotationModel, + DatasetModel, + TaskModel, + ExportModel +) +import io +import os +import time +import numpy as np +import pycocotools.mask as mask +import skimage.draw as sd + +import zipfile +from PIL import Image, ImageColor +from celery import shared_task +from ..socket import create_socket + +@shared_task +def export_semantic_segmentation(task_id, dataset_id, categories): + # Initiate a task and its socket + task = TaskModel.objects.get(id=task_id) + task.info(f"Beginning Export Semantic Segmentation") + task.update(status="PROGRESS") + socket = create_socket() + + # Get the needed items from the database + dataset = DatasetModel.objects.get(id=dataset_id) + db_categories = CategoryModel.objects(id__in=categories, deleted=False) \ + .only(*CategoryModel.COCO_PROPERTIES) + db_images = ImageModel.objects( + deleted=False, dataset_id=dataset.id).only( + *ImageModel.COCO_PROPERTIES) + db_annotations = AnnotationModel.objects( + deleted=False, category_id__in=categories) + + # Iterate through all categories to get the color and name of each one + category_names = [] + category_colors = [(0, 0, 0)] + for category in fix_ids(db_categories): + category_names.append(category.get('name')) + category_colors.append( ImageColor.getcolor(category.get('color'), 'RGB')) + + # Get the path + # Generate a unique name for the zip + timestamp = time.time() + directory = f"{dataset.directory}.exports/" + zip_path = f"{directory}SemanticSeg-{timestamp}.zip" + + if not os.path.exists(directory): + os.makedirs(directory) + + # Initiate progress counter + progress = 0 + total_images = len(db_images) + + with zipfile.ZipFile(zip_path, 'w', compression=zipfile.ZIP_DEFLATED) as zip_file: + # Iterate through each image and + # save its corresponding semantic segmentation + for image in db_images: + image = fix_ids(image) + width = image.get('width') + height = image.get('height') + + img_annotations = db_annotations.filter(image_id=image.get('id'))\ + .only(*AnnotationModel.COCO_PROPERTIES) + + final_image_array = np.zeros((height, width)) + category_index = 1 + found_categories = [] + + for category in fix_ids(db_categories): + category_annotations = img_annotations\ + .filter(category_id=category.get('id'))\ + .only(*AnnotationModel.COCO_PROPERTIES) + + if category_annotations.count() == 0: + category_index += 1 + continue + + found_categories.append(category_index) + category_annotations = fix_ids(category_annotations) + + for annotation in category_annotations: + has_polygon_segmentation = len(annotation.get('segmentation', [])) > 0 + has_rle_segmentation = annotation.get('rle', {}) != {} + + if has_rle_segmentation: + # Extract the binary mask + CompressedRle = annotation.get('rle') + bin_mask = mask.decode(CompressedRle) + idx = bin_mask == 1 + final_image_array[idx] = category_index + elif has_polygon_segmentation: + # Convert into rle + bin_mask = get_bin_mask(list(annotation.get('segmentation')), height, width) + idx = bin_mask == 1 + final_image_array[idx] = category_index + category_index += 1 + # Generate a RGB image to be saved in the zip + r = np.zeros_like(final_image_array).astype(np.uint8) + g = np.zeros_like(final_image_array).astype(np.uint8) + b = np.zeros_like(final_image_array).astype(np.uint8) + for l in found_categories: + idx = final_image_array == l + x, y, z = category_colors[l] + r[idx] = x + g[idx] = y + b[idx] = z + rgb = np.stack([r, g, b], axis=2) + image_io = io.BytesIO() + Image.fromarray(rgb.astype('uint8')).save(image_io, "PNG", quality=95) + + # Write the image to the zip + task.info(f"Writing image {image.get('id')} to the zipfile") + zip_file.writestr(dataset.name +"/" + image.get('file_name'), image_io.getvalue()) + + # Update progress + progress+=1 + task.set_progress((progress / total_images) * 100, socket=socket) + + zip_file.close() + + task.info("Finished Generating Image segmentation... Sending the zipfile") + + export = ExportModel(dataset_id=dataset.id, path=zip_path, + tags=["SemanticSeg", *category_names]) + export.save() + +def get_bin_mask(segmentation, image_height, image_width): + """ + Computes the binary mask of an annotation in polygon format. + It separates segmentations in line and point format (they are not supported by PyCOCOTools) + (it's the same function as in webserver/util/coco_util. + Couldn't acces it from this folder ) + :return: binary mask np.array format + """ + bin_mask = np.zeros((image_height, image_width)) + points = [] + lines = [] + polygons = [] + + for segment in segmentation : + if len(segment) == 2: + points.append(segment) + elif len(segment) == 4: + lines.append(segment) + else : + polygons.append(segment) + + if len(polygons) != 0 : + # Convert into rle + rles = mask.frPyObjects(polygons, image_height, image_width) + rle = mask.merge(rles) + # Extract the binary mask + bin_mask = mask.decode(rle) + + for point in points: + bin_mask[round(point[1])][round(point[0])] = 1 + + for line in lines: + rr, cc = sd.line(round(line[0]), round(line[1]), round(line[2]), round(line[3])) + bin_mask[cc,rr] = 1 + + return bin_mask +__all__ = ["export_semantic_segmentation"] diff --git a/client/src/components/annotator/Annotation.vue b/client/src/components/annotator/Annotation.vue index 1921c202..95afac21 100755 --- a/client/src/components/annotator/Annotation.vue +++ b/client/src/components/annotator/Annotation.vue @@ -1,8 +1,5 @@ diff --git a/client/src/components/annotator/panels/PolygonPanel.vue b/client/src/components/annotator/panels/PolygonPanel.vue index 951d8761..47b1a3e2 100755 --- a/client/src/components/annotator/panels/PolygonPanel.vue +++ b/client/src/components/annotator/panels/PolygonPanel.vue @@ -27,6 +27,13 @@ step="2" v-model="polygon.polygon.minDistance" /> + diff --git a/client/src/components/annotator/tools/BrushTool.vue b/client/src/components/annotator/tools/BrushTool.vue index 5270e1c4..b9b72c68 100755 --- a/client/src/components/annotator/tools/BrushTool.vue +++ b/client/src/components/annotator/tools/BrushTool.vue @@ -3,7 +3,7 @@ import paper from "paper"; import tool from "@/mixins/toolBar/tool"; export default { - name: "EraserTool", + name: "BrushTool", mixins: [tool], props: { scale: { @@ -19,6 +19,7 @@ export default { scaleFactor: 3, brush: { path: null, + simplify: 0, pathOptions: { strokeColor: "white", strokeWidth: 1, @@ -56,6 +57,7 @@ export default { radius: this.brush.pathOptions.radius, center: center }); + }, createSelection() { this.selection = new paper.Path({ @@ -92,6 +94,8 @@ export default { * Unites current selection with selected annotation */ merge() { + this.selection["segmentsType"] = 'pixel'; + this.selection["simplifyDegree"] = this.brush.simplify; this.$parent.uniteCurrentAnnotation(this.selection); }, decreaseRadius() { @@ -105,7 +109,8 @@ export default { export() { return { strokeColor: this.brush.pathOptions.strokeColor, - radius: this.brush.pathOptions.radius + radius: this.brush.pathOptions.radius, + simplify: this.brush.simplify }; }, setPreferences(pref) { @@ -133,6 +138,9 @@ export default { this.brush.path.strokeColor = newColor; }, + "brush.simplify"(newDegree) { + this.simplify = newDegree; + }, isActive(active) { if (this.brush.path != null) { this.brush.path.visible = active; diff --git a/client/src/components/annotator/tools/DownloadSemanticSegButton.vue b/client/src/components/annotator/tools/DownloadSemanticSegButton.vue new file mode 100755 index 00000000..2aeaff5d --- /dev/null +++ b/client/src/components/annotator/tools/DownloadSemanticSegButton.vue @@ -0,0 +1,36 @@ + diff --git a/client/src/components/annotator/tools/EraserTool.vue b/client/src/components/annotator/tools/EraserTool.vue index 8be6591b..e16ef175 100755 --- a/client/src/components/annotator/tools/EraserTool.vue +++ b/client/src/components/annotator/tools/EraserTool.vue @@ -25,7 +25,8 @@ export default { strokeWidth: 1, radius: 30 } - } + }, + selection: null }; }, methods: { @@ -35,15 +36,19 @@ export default { this.eraser.brush = null; } }, + removeSelection() { + if (this.selection != null) { + this.selection.remove(); + this.selection = null; + } + }, moveBrush(point) { if (this.eraser.brush == null) this.createBrush(); - this.eraser.brush.bringToFront(); this.eraser.brush.position = point; }, createBrush(center) { center = center || new paper.Point(0, 0); - this.eraser.brush = new paper.Path.Circle({ strokeColor: this.eraser.pathOptions.strokeColor, strokeWidth: this.eraser.pathOptions.strokeWidth, @@ -51,6 +56,12 @@ export default { center: center }); }, + createSelection() { + this.selection = new paper.Path({ + strokeColor: this.eraser.pathOptions.strokeColor, + strokeWidth: this.eraser.pathOptions.strokeWidth + }); + }, onMouseMove(event) { this.moveBrush(event.point); }, @@ -59,16 +70,20 @@ export default { this.erase(); }, onMouseDown() { - this.$parent.currentAnnotation.createUndoAction("Subtract"); + this.createSelection(); this.erase(); }, onMouseUp() { - this.$parent.currentAnnotation.simplifyPath(); + this.$parent.currentAnnotation.createUndoAction("Subtract"); + this.$parent.currentAnnotation.subtract(this.selection, true, false); + this.removeSelection(); }, erase() { - // Undo action, will be handled on mouse down - // Simplify, will be handled on mouse up - this.$parent.currentAnnotation.subtract(this.eraser.brush, false, false); + // Undo action, will be handled on mouse up + let newSelection = this.selection.unite(this.eraser.brush); + + this.selection.remove(); + this.selection = newSelection; }, decreaseRadius() { if (!this.isActive) return; @@ -99,21 +114,18 @@ export default { watch: { "eraser.pathOptions.radius"() { if (this.eraser.brush == null) return; - let position = this.eraser.brush.position; this.eraser.brush.remove(); this.createBrush(position); }, "eraser.pathOptions.strokeColor"(newColor) { if (this.eraser.brush == null) return; - this.eraser.brush.strokeColor = newColor; }, isActive(active) { if (this.eraser.brush != null) { this.eraser.brush.visible = active; } - if (active) { this.tool.activate(); localStorage.setItem("editorTool", this.name); @@ -130,4 +142,4 @@ export default { }, mounted() {} }; - + \ No newline at end of file diff --git a/client/src/components/annotator/tools/PolygonTool.vue b/client/src/components/annotator/tools/PolygonTool.vue index 15cf7e6d..0f493e55 100755 --- a/client/src/components/annotator/tools/PolygonTool.vue +++ b/client/src/components/annotator/tools/PolygonTool.vue @@ -54,6 +54,7 @@ export default { ...mapMutations(["addUndo", "removeUndos"]), export() { return { + simplify: this.polygon.simplify, guidance: this.polygon.guidance, completeDistance: this.polygon.completeDistance, minDistance: this.polygon.minDistance, @@ -82,6 +83,8 @@ export default { ); } this.polygon.path = new paper.Path(this.polygon.pathOptions); + this.polygon.path["segmentsType"] = 'polygon'; + this.polygon.path["simplifyDegree"] = this.simplify; }, /** * Frees current polygon @@ -198,7 +201,6 @@ export default { this.polygon.path.fillColor = "black"; this.polygon.path.closePath(); - this.$parent.uniteCurrentAnnotation(this.polygon.path); this.polygon.path.remove(); @@ -209,7 +211,7 @@ export default { } this.removeUndos(this.actionTypes.ADD_POINTS); - this.$parent.save(); + //this.$parent.save(); return true; }, removeLastPoint() { @@ -249,6 +251,10 @@ export default { "polygon.minDistance"(newDistance) { this.tool.minDistance = newDistance; }, + "polygon.simplify"(newDegree) { + this.tool.simplify = newDegree; + this.simplify = newDegree; + }, "polygon.pathOptions.strokeColor"(newColor) { if (this.polygon.path == null) return; diff --git a/client/src/mixins/toolBar/tool.js b/client/src/mixins/toolBar/tool.js index f845ea09..20c83472 100755 --- a/client/src/mixins/toolBar/tool.js +++ b/client/src/mixins/toolBar/tool.js @@ -64,6 +64,9 @@ export default { if (this.isDisabled) { return this.name + " (select an annotation to activate tool)"; } + if (this.name == "Brush") { + return this.name + " Tool" + " (for RLE segmentation format)"; + } return this.name + " Tool"; } }, diff --git a/client/src/models/datasets.js b/client/src/models/datasets.js old mode 100644 new mode 100755 index 4cc06a7f..59bdecce --- a/client/src/models/datasets.js +++ b/client/src/models/datasets.js @@ -30,8 +30,11 @@ export default { scan(id) { return axios.get(`${baseURL}/${id}/scan`); }, - exportingCOCO(id, categories) { - return axios.get(`${baseURL}/${id}/export?categories=${categories}`); + exportingCOCO(id, categories, with_empty_images) { + return axios.get(`${baseURL}/${id}/export?categories=${categories}&with_empty_images=${with_empty_images}`); + }, + exportingSemanticSegmentation(id, categories){ + return axios.get(`${baseURL}/${id}/semanticSegmentation?categories=${categories}`); }, getCoco(id) { return axios.get(`${baseURL}/${id}/coco`); diff --git a/client/src/models/exports.js b/client/src/models/exports.js old mode 100644 new mode 100755 index 9f55fa6c..0ff0c7e8 --- a/client/src/models/exports.js +++ b/client/src/models/exports.js @@ -11,8 +11,8 @@ export default { }).then(response => { const url = window.URL.createObjectURL(new Blob([response.data])); const link = document.createElement("a"); - link.href = url; - link.setAttribute("download", `${dataset}-${id}.json`); + link.href = `${baseURL}/${id}/download`//url; + link.download = `${dataset}-${id}`; document.body.appendChild(link); link.click(); }); diff --git a/client/src/views/Annotator.vue b/client/src/views/Annotator.vue index fbc2e6b8..7d5feab0 100755 --- a/client/src/views/Annotator.vue +++ b/client/src/views/Annotator.vue @@ -80,7 +80,9 @@
- + @@ -245,6 +247,7 @@ import DEXTRTool from "@/components/annotator/tools/DEXTRTool"; import CopyAnnotationsButton from "@/components/annotator/tools/CopyAnnotationsButton"; import CenterButton from "@/components/annotator/tools/CenterButton"; import DownloadButton from "@/components/annotator/tools/DownloadButton"; +import DownloadSemanticSegButton from "@/components/annotator/tools/DownloadSemanticSegButton"; import SaveButton from "@/components/annotator/tools/SaveButton"; import SettingsButton from "@/components/annotator/tools/SettingsButton"; import ModeButton from "@/components/annotator/tools/ModeButton"; @@ -282,6 +285,7 @@ export default { BrushTool, KeypointTool, DownloadButton, + DownloadSemanticSegButton, SaveButton, SettingsButton, DeleteButton, @@ -673,11 +677,9 @@ export default { if (this.currentCategory == null) return; this.currentAnnotation.subtract(compound, simplify, undoable); }, - selectLastEditorTool() { this.activeTool = localStorage.getItem("editorTool") || "Select"; }, - setCursor(newCursor) { this.cursor = newCursor; }, diff --git a/client/src/views/Dataset.vue b/client/src/views/Dataset.vue index e4d11db7..5001e911 100755 --- a/client/src/views/Dataset.vue +++ b/client/src/views/Dataset.vue @@ -1,32 +1,54 @@