Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New features(Export annotations in RLE, Visualize semantic segmentation ...) #508

Open
wants to merge 31 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
5397392
polygonTool and brushTool use the same simplyPath, with different seg…
Chaimaa-Louahabi Jul 9, 2021
1933a34
binary mask visualisation
Chaimaa-Louahabi Jul 21, 2021
0dba42b
1st stable version, polygon segmentation format
Chaimaa-Louahabi Aug 4, 2021
66d1ab8
refactor unite to keep isolated points and lines
Chaimaa-Louahabi Aug 5, 2021
e66efa9
refactor paperjs_to_coco to keep isolated points and lines
Chaimaa-Louahabi Aug 5, 2021
697214b
refactor subtract to handle isolated points and lines
Chaimaa-Louahabi Aug 5, 2021
b58c221
Generate binary mask for lines and isolated pixels
Chaimaa-Louahabi Aug 6, 2021
3123023
Use pixelMode
Chaimaa-Louahabi Aug 20, 2021
7e27144
Add a task for Generating a dataset's semantic segmentation
Chaimaa-Louahabi Aug 31, 2021
1351635
Add export semantic segmentation button and its handlers
Chaimaa-Louahabi Aug 31, 2021
dae748b
Send rasterized commpoundPath to the backend
Chaimaa-Louahabi Aug 31, 2021
2568279
Change file name, change URI, Send image id only
Chaimaa-Louahabi Aug 31, 2021
1569559
Edit attachement file name
Chaimaa-Louahabi Aug 31, 2021
46c88a3
Send Image's semantic segmentation as attachement
Chaimaa-Louahabi Aug 31, 2021
185c92e
Handle "export semantic segmentation" request from dataset
Chaimaa-Louahabi Aug 31, 2021
2efb0a8
Handle export semantic segmentation request
Chaimaa-Louahabi Aug 31, 2021
d619cf4
Encode binary mask to RLE
Chaimaa-Louahabi Aug 31, 2021
16eaff3
Keep category colors chosen by the user
Chaimaa-Louahabi Aug 31, 2021
17a1173
Delete empty annotation
Chaimaa-Louahabi Sep 1, 2021
d32b52d
Fix color choice
Chaimaa-Louahabi Sep 1, 2021
3e48006
RLE in database in compressed format
Chaimaa-Louahabi Sep 1, 2021
79202cf
Stable version for lines and polygons combined
Chaimaa-Louahabi Jun 24, 2021
657e6ec
add an inputNumber panel for simplification degree
Chaimaa-Louahabi Jul 5, 2021
475bba1
Delete comments
Chaimaa-Louahabi Sep 21, 2021
08bab7f
Improve UI experience (#520)
felixdollack Feb 1, 2022
2ed7355
Fix Datasets Page never display Done status (#525)
geminixiang Feb 1, 2022
11a4838
Optimize Annotator API Query (#535)
geminixiang Feb 1, 2022
351fbff
Added an Annotations per User section to the stats section. (#538)
Shadercloud Feb 1, 2022
9cce5d2
enable exporting un-annotated img (#449)
d-wei-of-sensyn Feb 1, 2022
44dad57
Fixing merge conflicts
Chaimaa-Louahabi Apr 10, 2022
ad5ba31
Reset file
Chaimaa-Louahabi Nov 4, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Empty file modified README.md
100644 → 100755
Empty file.
12 changes: 5 additions & 7 deletions backend/database/annotations.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import imantics as im
import json

from mongoengine import *

from .datasets import DatasetModel
from .categories import CategoryModel
from .events import Event
Expand All @@ -11,7 +9,7 @@

class AnnotationModel(DynamicDocument):

COCO_PROPERTIES = ["id", "image_id", "category_id", "segmentation",
COCO_PROPERTIES = ["id", "image_id", "category_id", "segmentation", "rle",
"iscrowd", "color", "area", "bbox", "metadata",
"keypoints", "isbbox"]

Expand All @@ -20,12 +18,13 @@ class AnnotationModel(DynamicDocument):
category_id = IntField(required=True)
dataset_id = IntField()

segmentation = ListField(default=[])
segmentation = ListField(default=[]) #segmentation in polygon format
rle = DictField(default={}) #segmentation in RLE format (Compressed RLE)
area = IntField(default=0)
bbox = ListField(default=[0, 0, 0, 0])
iscrowd = BooleanField(default=False)
isbbox = BooleanField(default=False)

creator = StringField(required=True)
width = IntField()
height = IntField()
Expand Down Expand Up @@ -59,7 +58,6 @@ def __init__(self, image_id=None, **data):
super(AnnotationModel, self).__init__(**data)

def save(self, copy=False, *args, **kwargs):

if self.dataset_id and not copy:
dataset = DatasetModel.objects(id=self.dataset_id).first()

Expand Down Expand Up @@ -88,7 +86,7 @@ def mask(self):
]
mask = cv2.fillPoly(mask, pts, 1)
return mask

def clone(self):
""" Creates a clone """
create = json.loads(self.to_json())
Expand Down
22 changes: 19 additions & 3 deletions backend/database/datasets.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def import_coco(self, coco_json):
"name": task.name
}

def export_coco(self, categories=None, style="COCO"):
def export_coco(self, categories=None, style="COCO", with_empty_images=False):

from workers.tasks import export_annotations

Expand All @@ -78,14 +78,30 @@ def export_coco(self, categories=None, style="COCO"):
)
task.save()

cel_task = export_annotations.delay(task.id, self.id, categories)
cel_task = export_annotations.delay(task.id, self.id, categories, with_empty_images)

return {
"celery_id": cel_task.id,
"id": task.id,
"name": task.name
}

def export_semantic_segmentation(self, categories=None) :
from workers.tasks import export_semantic_segmentation
if categories is None or len(categories) == 0:
categories = self.categories
task = TaskModel(
name=f"Exporting semantic segmentation of {self.name} ",
dataset_id=self.id,
group="Semantic Segmentation Export"
)
task.save()
cel_task = export_semantic_segmentation.delay(task.id, self.id, categories)
return {
"celery_id": cel_task.id,
"id": task.id,
"name": task.name
}

def scan(self):

from workers.tasks import scan_dataset
Expand Down
Empty file modified backend/webserver/api/annotations.py
100644 → 100755
Empty file.
90 changes: 66 additions & 24 deletions backend/webserver/api/annotator.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
import datetime
import base64
import io
import pycocotools.mask as mask
import numpy as np

from flask_restplus import Namespace, Resource
from flask_login import login_required, current_user
from flask import request
from PIL import Image

from ..util import query_util, coco_util, profile, thumbnails

Expand All @@ -16,7 +21,6 @@

api = Namespace('annotator', description='Annotator related operations')


@api.route('/data')
class AnnotatorData(Resource):

Expand Down Expand Up @@ -48,7 +52,6 @@ def post(self):

current_user.update(preferences=data.get('user', {}))

annotated = False
num_annotations = 0
# Iterate every category passed in the data
for category in data.get('categories', []):
Expand Down Expand Up @@ -108,25 +111,63 @@ def post(self):
)

paperjs_object = annotation.get('compoundPath', [])

# Update paperjs if it exists
area = 0
bbox = []
width = db_annotation.width
height = db_annotation.height
if len(paperjs_object) == 2:

width = db_annotation.width
height = db_annotation.height

# Generate coco formatted segmentation data
segmentation, area, bbox = coco_util.\
paperjs_to_coco(width, height, paperjs_object)
# Store segmentation in compressed RLE format
if (annotation.get('raster', {}) != {}) :
area = annotation.get('area', 0)
bbox = annotation.get('bbox')
ann_x = int(bbox[0])
ann_y = int(bbox[1])
ann_height = int(bbox[2])
ann_width = int(bbox[3])
dataurl = annotation.get('raster')

# Convert base64 image to RGB image
image_b64 = dataurl.split(",")[1]
binary = io.BytesIO(base64.b64decode(image_b64))
sub_image = Image.open(binary)
sub_image = np.array(sub_image).reshape((ann_height, ann_width, 4))

# convert RGB image to binary image( each pixel is either 0 or 1)
sub_binary_mask = np.sum(sub_image[:, :, :3], 2)
sub_binary_mask[sub_binary_mask>0] = 1

# Insert the sub binary mask into its position in the full image
full_binary_mask = np.zeros((height,width), np.uint8)
# Handle annotations exceeding image borders
y_0 = ann_y
y_end = ann_y+ann_height
x_0 = ann_x
x_end = ann_x+ann_width
full_binary_mask[y_0 : y_end, x_0 : x_end] = sub_binary_mask
rle = mask.encode(np.asfortranarray(full_binary_mask.astype('uint8')))
# Convert rle['counts] from a bytes list to a byte String
rle['counts'] = rle.get('counts').decode()
db_annotation.update(
set__rle = rle,
set__iscrowd = True,
set__segmentation= [] #Clear segmentation when moving from polygon format to rle
)
# Store segmentation in polygon format
else :
segmentation, area, bbox = coco_util.\
paperjs_to_coco(width, height, paperjs_object)

db_annotation.update(
set__segmentation=segmentation
)

db_annotation.update(
set__segmentation=segmentation,
set__area=area,
set__isbbox=annotation.get('isbbox', False),
set__bbox=bbox,
set__paper_object=paperjs_object,
set__area = area,
set__isbbox = annotation.get('isbbox', False),
set__bbox = bbox,
set__paper_object = paperjs_object
)

if area > 0:
counted = True

Expand All @@ -142,10 +183,8 @@ def post(self):
)

thumbnails.generate_thumbnail(image_model)

return {"success": True}


@api.route('/data/<int:image_id>')
class AnnotatorId(Resource):

Expand Down Expand Up @@ -190,18 +229,21 @@ def get(self, image_id):
data['image']['previous'] = pre.id if pre else None
data['image']['next'] = nex.id if nex else None

# Optimize query: query all annotation of specific image, and then categorize them according to the categories.
all_annotations = AnnotationModel.objects(image_id=image_id, deleted=False).exclude('events').all()

for category in categories:
category = query_util.fix_ids(category[1])

category_id = category.get('id')
annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\
.exclude('events').all()

annotations = []
for annotation in all_annotations:
if annotation['category_id'] == category_id:
annotations.append(query_util.fix_ids(annotation))

category['show'] = True
category['visualize'] = False
category['annotations'] = [] if annotations is None else query_util.fix_ids(annotations)
category['annotations'] = [] if annotations is None else annotations
data.get('categories').append(category)

return data


48 changes: 45 additions & 3 deletions backend/webserver/api/datasets.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from flask import request
from flask_restplus import Namespace, Resource, reqparse
from flask_restplus import Namespace, Resource, reqparse, inputs
from flask_login import login_required, current_user
from werkzeug.datastructures import FileStorage
from mongoengine.errors import NotUniqueError
Expand Down Expand Up @@ -46,6 +46,7 @@

export = reqparse.RequestParser()
export.add_argument('categories', type=str, default=None, required=False, help='Ids of categories to export')
export.add_argument('with_empty_images', type=inputs.boolean, default=False, required=False, help='Export with un-annotated images')

update_dataset = reqparse.RequestParser()
update_dataset.add_argument('categories', location='json', type=list, help="New list of categories")
Expand Down Expand Up @@ -179,6 +180,23 @@ def get(self, dataset_id):
# Calculate annotation counts by category in this dataset
category_count = dict()
image_category_count = dict()



user_stats = dict()

for user in dataset.get_users():
user_annots = AnnotationModel.objects(dataset_id=dataset_id, deleted=False, creator=user.username)
image_count = dict()
for annot in user_annots:
image_count[annot.image_id] = image_count.get(annot.image_id, 0) + 1

user_stats[user.username] = {
"annotations": len(user_annots),
"images": len(image_count)
}


for category in dataset.categories:

# Calculate the annotation count in the current category in this dataset
Expand Down Expand Up @@ -207,7 +225,8 @@ def get(self, dataset_id):
'Time (ms) per Annotation': annotations.average('milliseconds') or 0
},
'categories': category_count,
'images_per_category': image_category_count
'images_per_category': image_category_count,
'users': user_stats
}
return stats

Expand Down Expand Up @@ -497,6 +516,7 @@ def get(self, dataset_id):

args = export.parse_args()
categories = args.get('categories')
with_empty_images = args.get('with_empty_images', False)

if len(categories) == 0:
categories = []
Expand All @@ -509,7 +529,7 @@ def get(self, dataset_id):
if not dataset:
return {'message': 'Invalid dataset ID'}, 400

return dataset.export_coco(categories=categories)
return dataset.export_coco(categories=categories, with_empty_images=with_empty_images)

@api.expect(coco_upload)
@login_required
Expand All @@ -524,6 +544,28 @@ def post(self, dataset_id):

return dataset.import_coco(json.load(coco))

@api.route('/<int:dataset_id>/semanticSegmentation')
class DatasetSemanticSegmentation(Resource):

@api.expect(export)
@login_required
def get(self, dataset_id):

args = export.parse_args()
categories = args.get('categories')

if len(categories) == 0:
categories = []

if len(categories) > 0 or isinstance(categories, str):
categories = [int(c) for c in categories.split(',')]

dataset = DatasetModel.objects(id=dataset_id).first()

if not dataset:
return {'message': 'Invalid dataset ID'}, 400

return dataset.export_semantic_segmentation(categories=categories)

@api.route('/<int:dataset_id>/coco')
class DatasetCoco(Resource):
Expand Down
8 changes: 5 additions & 3 deletions backend/webserver/api/exports.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from flask import send_file
from flask_restplus import Namespace, Resource, reqparse
from flask_restplus import Namespace, Resource
from flask_login import login_required, current_user

import datetime

from ..util import query_util

from database import (
ExportModel,
DatasetModel,
fix_ids
)

Expand Down Expand Up @@ -66,6 +66,8 @@ def get(self, export_id):

if not current_user.can_download(dataset):
return {"message": "You do not have permission to download the dataset's annotations"}, 403
if len(list(export.tags)) >0 and list(export.tags)[0] == "SemanticSeg":
return send_file(export.path, attachment_filename=f"{dataset.name}-{'-'.join(export.tags)}.zip", as_attachment=True)

return send_file(export.path, attachment_filename=f"{dataset.name.encode('utf-8')}-{'-'.join(export.tags).encode('utf-8')}.json", as_attachment=True)
return send_file(export.path, attachment_filename=f"{dataset.name}-{'-'.join(export.tags)}.json", as_attachment=True)