Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions labelbox/data/annotation_types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from .annotation import VideoClassificationAnnotation
from .annotation import ObjectAnnotation
from .annotation import VideoObjectAnnotation
from .annotation import DICOMObjectAnnotation
from .annotation import GroupKey

from .ner import ConversationEntity
from .ner import DocumentEntity
Expand Down
41 changes: 40 additions & 1 deletion labelbox/data/annotation_types/annotation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import abc
from enum import Enum
from typing import Any, Dict, List, Optional, Union

from labelbox.data.mixins import ConfidenceNotSupportedMixin, ConfidenceMixin
Expand Down Expand Up @@ -66,7 +67,7 @@ class VideoObjectAnnotation(ObjectAnnotation, ConfidenceNotSupportedMixin):
>>> end=Point(x=1, y=1)
>>> ),
>>> feature_schema_id="my-feature-schema-id"
>>>)
>>> )

Args:
name (Optional[str])
Expand Down Expand Up @@ -97,3 +98,41 @@ class VideoClassificationAnnotation(ClassificationAnnotation):
"""
frame: int
segment_index: Optional[int] = None


class GroupKey(Enum):
"""Group key for DICOM annotations
"""
AXIAL = "axial"
SAGITTAL = "sagittal"
CORONAL = "coronal"


class DICOMObjectAnnotation(VideoObjectAnnotation):
"""DICOM object annotation

>>> DICOMObjectAnnotation(
>>> name="dicom_polyline",
>>> frame=2,
>>> value=lb_types.Line(points = [
>>> lb_types.Point(x=680, y=100),
>>> lb_types.Point(x=100, y=190),
>>> lb_types.Point(x=190, y=220)
>>> ]),
>>> segment_index=0,
>>> keyframe=True,
>>> group_key=GroupKey.AXIAL
>>> )

Args:
name (Optional[str])
feature_schema_id (Optional[Cuid])
value (Geometry)
group_key (GroupKey)
frame (Int): The frame index that this annotation corresponds to
keyframe (bool): Whether or not this annotation was a human generated or interpolated annotation
segment_id (Optional[Int]): Index of video segment this annotation belongs to
classifications (List[ClassificationAnnotation]) = []
extra (Dict[str, Any])
"""
group_key: GroupKey
9 changes: 4 additions & 5 deletions labelbox/data/annotation_types/label.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@
from labelbox.data.annotation_types.data.tiled_image import TiledImageData
from labelbox.schema import ontology
from .annotation import (ClassificationAnnotation, ObjectAnnotation,
VideoClassificationAnnotation, VideoObjectAnnotation)
VideoClassificationAnnotation, VideoObjectAnnotation,
DICOMObjectAnnotation)
from .classification import ClassificationAnswer
from .data import VideoData, TextData, ImageData
from .data import DicomData, VideoData, TextData, ImageData
from .geometry import Mask
from .metrics import ScalarMetric, ConfusionMatrixMetric
from .types import Cuid
Expand Down Expand Up @@ -39,9 +40,7 @@ class Label(BaseModel):
uid: Optional[Cuid] = None
data: Union[VideoData, ImageData, TextData, TiledImageData]
annotations: List[Union[ClassificationAnnotation, ObjectAnnotation,
VideoObjectAnnotation,
VideoClassificationAnnotation, ScalarMetric,
ConfusionMatrixMetric]] = []
ScalarMetric, ConfusionMatrixMetric]] = []
extra: Dict[str, Any] = {}

def object_annotations(self) -> List[ObjectAnnotation]:
Expand Down
22 changes: 14 additions & 8 deletions labelbox/data/serialization/ndjson/label.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,24 @@

from pydantic import BaseModel

from ...annotation_types.annotation import ClassificationAnnotation, ObjectAnnotation, VideoClassificationAnnotation, VideoObjectAnnotation
from ...annotation_types.annotation import ClassificationAnnotation, ObjectAnnotation, VideoClassificationAnnotation, VideoObjectAnnotation, DICOMObjectAnnotation
from ...annotation_types.collection import LabelCollection, LabelGenerator
from ...annotation_types.data import ImageData, TextData, VideoData
from ...annotation_types.data import DicomData, ImageData, TextData, VideoData
from ...annotation_types.label import Label
from ...annotation_types.ner import TextEntity, ConversationEntity
from ...annotation_types.classification import Dropdown
from ...annotation_types.metrics import ScalarMetric, ConfusionMatrixMetric

from .metric import NDScalarMetric, NDMetricAnnotation, NDConfusionMatrixMetric
from .classification import NDChecklistSubclass, NDClassification, NDClassificationType, NDRadioSubclass
from .objects import NDObject, NDObjectType, NDSegments
from .objects import NDObject, NDObjectType, NDSegments, NDDicomSegments
from .base import DataRow


class NDLabel(BaseModel):
annotations: List[Union[NDObjectType, NDClassificationType,
NDConfusionMatrixMetric, NDScalarMetric,
NDSegments]]
NDDicomSegments, NDSegments]]

def to_common(self) -> LabelGenerator:
grouped_annotations = defaultdict(list)
Expand Down Expand Up @@ -52,7 +52,11 @@ def _generate_annotations(
annots = []
data_row = annotations[0].data_row
for annotation in annotations:
if isinstance(annotation, NDSegments):
if isinstance(annotation, NDDicomSegments):
annots.extend(
NDDicomSegments.to_common(annotation, annotation.name,
annotation.schema_id))
elif isinstance(annotation, NDSegments):
annots.extend(
NDSegments.to_common(annotation, annotation.name,
annotation.schema_id))
Expand All @@ -73,9 +77,9 @@ def _infer_media_type(
self, data_row: DataRow,
annotations: List[Union[TextEntity, ConversationEntity,
VideoClassificationAnnotation,
VideoObjectAnnotation, ObjectAnnotation,
ClassificationAnnotation, ScalarMetric,
ConfusionMatrixMetric]]
DICOMObjectAnnotation, VideoObjectAnnotation,
ObjectAnnotation, ClassificationAnnotation,
ScalarMetric, ConfusionMatrixMetric]]
) -> Union[TextData, VideoData, ImageData]:
if len(annotations) == 0:
raise ValueError("Missing annotations while inferring media type")
Expand All @@ -86,6 +90,8 @@ def _infer_media_type(
data = TextData
elif VideoClassificationAnnotation in types or VideoObjectAnnotation in types:
data = VideoData
elif DICOMObjectAnnotation in types:
data = DicomData

if data_row.id:
return data(uid=data_row.id)
Expand Down
108 changes: 99 additions & 9 deletions labelbox/data/serialization/ndjson/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from ...annotation_types.ner import DocumentEntity, DocumentTextSelection, TextEntity
from ...annotation_types.types import Cuid
from ...annotation_types.geometry import Rectangle, Polygon, Line, Point, Mask
from ...annotation_types.annotation import ClassificationAnnotation, ObjectAnnotation, VideoObjectAnnotation
from ...annotation_types.annotation import ClassificationAnnotation, ObjectAnnotation, VideoObjectAnnotation, DICOMObjectAnnotation
from .classification import NDSubclassification, NDSubclassificationType
from .base import DataRow, NDAnnotation

Expand All @@ -30,6 +30,10 @@ class VideoSupported(BaseModel):
frame: int


class DicomSupported(BaseModel):
group_key: str


class _Point(BaseModel):
x: float
y: float
Expand Down Expand Up @@ -136,6 +140,20 @@ def from_common(cls, frame: int, line: Line):
} for pt in line.points])


class NDDicomLine(NDFrameLine):

def to_common(self, name: str, feature_schema_id: Cuid, segment_index: int,
group_key: str) -> DICOMObjectAnnotation:
return DICOMObjectAnnotation(
frame=self.frame,
segment_index=segment_index,
keyframe=True,
name=name,
feature_schema_id=feature_schema_id,
value=Line(points=[Point(x=pt.x, y=pt.y) for pt in self.line]),
group_key=group_key)


class NDPolygon(NDBaseObject, ConfidenceMixin):
polygon: List[_Point]

Expand Down Expand Up @@ -259,6 +277,31 @@ def from_common(cls, segment):
])


class NDDicomSegment(NDSegment):
keyframes: List[NDDicomLine]

@staticmethod
def lookup_segment_object_type(segment: List) -> "NDDicomObjectType":
"""Used for determining which object type the annotation contains
returns the object type"""
segment_class = type(segment[0].value)
if segment_class == Line:
return NDDicomLine
else:
raise ValueError('DICOM segments only support Line objects')

def to_common(self, name: str, feature_schema_id: Cuid, uuid: str,
segment_index: int, group_key: str):
return [
self.segment_with_uuid(
keyframe.to_common(name=name,
feature_schema_id=feature_schema_id,
segment_index=segment_index,
group_key=group_key), uuid)
for keyframe in self.keyframes
]


class NDSegments(NDBaseObject):
segments: List[NDSegment]

Expand Down Expand Up @@ -287,6 +330,36 @@ def from_common(cls, segments: List[VideoObjectAnnotation], data: VideoData,
uuid=extra.get('uuid'))


class NDDicomSegments(NDBaseObject, DicomSupported):
segments: List[NDDicomSegment]

def to_common(self, name: str, feature_schema_id: Cuid):
result = []
for idx, segment in enumerate(self.segments):
result.extend(
NDDicomSegment.to_common(segment,
name=name,
feature_schema_id=feature_schema_id,
segment_index=idx,
uuid=self.uuid,
group_key=self.group_key))
return result

@classmethod
def from_common(cls, segments: List[DICOMObjectAnnotation], data: VideoData,
name: str, feature_schema_id: Cuid, extra: Dict[str, Any],
group_key: str) -> "NDDicomSegments":

segments = [NDDicomSegment.from_common(segment) for segment in segments]

return cls(segments=segments,
dataRow=DataRow(id=data.uid),
name=name,
schema_id=feature_schema_id,
uuid=extra.get('uuid'),
group_key=group_key)


class _URIMask(BaseModel):
instanceURI: str
colorRGB: Tuple[int, int, int]
Expand Down Expand Up @@ -460,13 +533,21 @@ def from_common(
obj = cls.lookup_object(annotation)

# if it is video segments
if (obj == NDSegments):
return obj.from_common(
annotation,
data,
name=annotation[0][0].name,
feature_schema_id=annotation[0][0].feature_schema_id,
extra=annotation[0][0].extra)
if (obj == NDSegments or obj == NDDicomSegments):

first_video_annotation = annotation[0][0]
args = dict(
segments=annotation,
data=data,
name=first_video_annotation.name,
feature_schema_id=first_video_annotation.feature_schema_id,
extra=first_video_annotation.extra)

if isinstance(first_video_annotation, DICOMObjectAnnotation):
group_key = first_video_annotation.group_key.value
args.update(dict(group_key=group_key))

return obj.from_common(**args)

subclasses = [
NDSubclassification.from_common(annot)
Expand All @@ -483,7 +564,15 @@ def from_common(
def lookup_object(
annotation: Union[ObjectAnnotation, List]) -> "NDObjectType":
if isinstance(annotation, list):
result = NDSegments
try:
first_annotation = annotation[0][0]
except IndexError:
raise ValueError("Annotation list cannot be empty")

if isinstance(first_annotation, DICOMObjectAnnotation):
result = NDDicomSegments
else:
result = NDSegments
else:
result = {
Line: NDLine,
Expand All @@ -510,3 +599,4 @@ def lookup_object(
NDEntityType, NDDocumentEntity]

NDFrameObjectType = NDFrameRectangle, NDFramePoint, NDFrameLine
NDDicomObjectType = NDDicomLine
15 changes: 8 additions & 7 deletions tests/data/serialization/coco/test_coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,28 +6,29 @@
COCO_ASSETS_DIR = "tests/data/assets/coco"


def run_instances():
def run_instances(tmpdir):
instance_json = json.load(open(Path(COCO_ASSETS_DIR, 'instances.json')))
res = COCOConverter.deserialize_instances(instance_json,
Path(COCO_ASSETS_DIR, 'images'))
back = COCOConverter.serialize_instances(
res,
Path('tmp/images_instances'),
Path(tmpdir),
)


def test_rle_objects():
def test_rle_objects(tmpdir):
rle_json = json.load(open(Path(COCO_ASSETS_DIR, 'rle.json')))
res = COCOConverter.deserialize_instances(rle_json,
Path(COCO_ASSETS_DIR, 'images'))
back = COCOConverter.serialize_instances(res, Path('/tmp/images_rle'))
back = COCOConverter.serialize_instances(res, tmpdir)


def test_panoptic():
def test_panoptic(tmpdir):
panoptic_json = json.load(open(Path(COCO_ASSETS_DIR, 'panoptic.json')))
image_dir, mask_dir = [
Path(COCO_ASSETS_DIR, dir_name) for dir_name in ['images', 'masks']
]
res = COCOConverter.deserialize_panoptic(panoptic_json, image_dir, mask_dir)
back = COCOConverter.serialize_panoptic(res, Path('/tmp/images_panoptic'),
Path('/tmp/masks_panoptic'))
back = COCOConverter.serialize_panoptic(res,
Path(f'/{tmpdir}/images_panoptic'),
Path(f'/{tmpdir}/masks_panoptic'))
Loading