diff --git a/docling_core/experimental/serializer/base.py b/docling_core/experimental/serializer/base.py index 41c8cd45..867ae865 100644 --- a/docling_core/experimental/serializer/base.py +++ b/docling_core/experimental/serializer/base.py @@ -197,7 +197,7 @@ def serialize_hyperlink( @abstractmethod def get_parts( self, - node: Optional[NodeItem] = None, + item: Optional[NodeItem] = None, **kwargs, ) -> list[SerializationResult]: """Get the components to be combined for serializing this node.""" @@ -222,6 +222,6 @@ def serialize_captions( ... @abstractmethod - def get_excluded_refs(self) -> list[str]: + def get_excluded_refs(self, **kwargs) -> list[str]: """Get references to excluded items.""" ... diff --git a/docling_core/experimental/serializer/common.py b/docling_core/experimental/serializer/common.py index 17735c10..b94d5a6e 100644 --- a/docling_core/experimental/serializer/common.py +++ b/docling_core/experimental/serializer/common.py @@ -5,12 +5,13 @@ """Define base classes for serialization.""" import sys -from functools import cached_property +from abc import abstractmethod +from copy import deepcopy from pathlib import Path -from typing import Optional, Union +from typing import Any, Optional, Union -from pydantic import AnyUrl, BaseModel, computed_field -from typing_extensions import override +from pydantic import AnyUrl, BaseModel, NonNegativeInt +from typing_extensions import Self, override from docling_core.experimental.serializer.base import ( BaseDocSerializer, @@ -24,9 +25,7 @@ BaseTextSerializer, SerializationResult, ) -from docling_core.types.doc.base import ImageRefMode from docling_core.types.doc.document import ( - DEFAULT_CONTENT_LAYERS, DOCUMENT_TOKENS_EXPORT_LABELS, ContentLayer, DocItem, @@ -38,10 +37,7 @@ KeyValueItem, NodeItem, OrderedList, - PictureClassificationData, - PictureDescriptionData, PictureItem, - PictureMoleculeData, TableItem, TextItem, UnorderedList, @@ -49,6 +45,30 @@ from docling_core.types.doc.labels import DocItemLabel _DEFAULT_LABELS = DOCUMENT_TOKENS_EXPORT_LABELS +_DEFAULT_LAYERS = {cl for cl in ContentLayer} + + +class CommonParams(BaseModel): + """Common serialization parameters.""" + + # allowlists with non-recursive semantics, i.e. if a list group node is outside the + # range and some of its children items are within, they will be serialized + labels: set[DocItemLabel] = _DEFAULT_LABELS + layers: set[ContentLayer] = _DEFAULT_LAYERS + pages: Optional[set[int]] = None # None means all pages are allowed + + # slice-like semantics: start is included, stop is excluded + start_idx: NonNegativeInt = 0 + stop_idx: NonNegativeInt = sys.maxsize + + include_formatting: bool = True + include_hyperlinks: bool = True + caption_delim: str = " " + + def merge_with_patch(self, patch: dict[str, Any]) -> Self: + """Create an instance by merging the provided patch dict on top of self.""" + res = self.model_validate({**self.model_dump(), **patch}) + return res class DocSerializer(BaseModel, BaseDocSerializer): @@ -58,22 +78,10 @@ class Config: """Pydantic config.""" arbitrary_types_allowed = True + extra = "forbid" doc: DoclingDocument - include_formatting: bool = True - include_hyperlinks: bool = True - escape_underscores: bool = True - - # this filtering criteria are non-recursive; - # e.g. if a list group node is outside the range and some of its children items are - # within, they will be serialized - start: int = 0 - stop: int = sys.maxsize - labels: set[DocItemLabel] = _DEFAULT_LABELS - layers: set[ContentLayer] = DEFAULT_CONTENT_LAYERS - pages: Optional[set[int]] = None - text_serializer: BaseTextSerializer table_serializer: BaseTableSerializer picture_serializer: BasePictureSerializer @@ -84,34 +92,34 @@ class Config: list_serializer: BaseListSerializer inline_serializer: BaseInlineSerializer - # these will be passed to the picture serializer (None defers/delegates fallback - # setting to callee): - image_placeholder: Optional[str] = None - image_mode: Optional[ImageRefMode] = None + params: CommonParams = CommonParams() - @computed_field # type: ignore[misc] - @cached_property - def _excluded_refs(self) -> list[str]: + # TODO add cache based on start-stop params + @override + def get_excluded_refs(self, **kwargs) -> list[str]: + """References to excluded items.""" + params = self.params.merge_with_patch(patch=kwargs) refs: list[str] = [ item.self_ref for ix, (item, _) in enumerate( self.doc.iterate_items( with_groups=True, traverse_pictures=True, + included_content_layers=params.layers, ) ) if ( - (ix < self.start or ix >= self.stop) + (ix < params.start_idx or ix >= params.stop_idx) or ( isinstance(item, DocItem) and ( - item.label not in self.labels - or item.content_layer not in self.layers + item.label not in params.labels + or item.content_layer not in params.layers or ( - self.pages is not None + params.pages is not None and ( (not item.prov) - or item.prov[0].page_no not in self.pages + or item.prov[0].page_no not in params.pages ) ) ) @@ -120,114 +128,189 @@ def _excluded_refs(self) -> list[str]: ] return refs - @override - def get_excluded_refs(self) -> list[str]: - """References to excluded items.""" - return self._excluded_refs + @abstractmethod + def serialize_page(self, parts: list[SerializationResult]) -> SerializationResult: + """Serialize a page out of its parts.""" + ... + + @abstractmethod + def serialize_doc(self, pages: list[SerializationResult]) -> SerializationResult: + """Serialize a document out of its pages.""" + ... + + def _serialize_body(self) -> SerializationResult: + """Serialize the document body.""" + # find page ranges if available; otherwise regard whole doc as a single page + last_page: Optional[int] = None + starts: list[int] = [] + for ix, (item, _) in enumerate( + self.doc.iterate_items( + with_groups=True, + traverse_pictures=True, + included_content_layers=self.params.layers, + ) + ): + if isinstance(item, DocItem): + if item.prov: + if last_page is None or item.prov[0].page_no > last_page: + starts.append(ix) + last_page = item.prov[0].page_no + page_ranges = [ + ( + (starts[i] if i > 0 else 0), + (starts[i + 1] if i < len(starts) - 1 else sys.maxsize), + ) + for i, _ in enumerate(starts) + ] or [ + (0, sys.maxsize) + ] # use whole range if no pages detected + + page_results: list[SerializationResult] = [] + for page_range in page_ranges: + params_to_pass = deepcopy(self.params) + params_to_pass.start_idx = page_range[0] + params_to_pass.stop_idx = page_range[1] + subparts = self.get_parts(**params_to_pass.model_dump()) + page_res = self.serialize_page(subparts) + page_results.append(page_res) + res = self.serialize_doc(page_results) + return res - # making some assumptions about the kwargs it can pass @override - def get_parts( + def serialize( self, - node: Optional[NodeItem] = None, *, - traverse_pictures: bool = False, + item: Optional[NodeItem] = None, list_level: int = 0, is_inline_scope: bool = False, visited: Optional[set[str]] = None, # refs of visited items **kwargs, - ) -> list[SerializationResult]: - """Get the components to be combined for serializing this node.""" + ) -> SerializationResult: + """Serialize a given node.""" my_visited: set[str] = visited if visited is not None else set() - parts: list[SerializationResult] = [] + empty_res = SerializationResult(text="") + if item is None or item == self.doc.body: + if self.doc.body.self_ref not in my_visited: + my_visited.add(self.doc.body.self_ref) + return self._serialize_body() + else: + return empty_res label_blocklist = { + # captions only considered in context of floating items (pictures, tables) DocItemLabel.CAPTION, - DocItemLabel.FOOTNOTE, - # TODO handle differently as it clashes with self.labels } - for ix, (item, _) in enumerate( - self.doc.iterate_items( - root=node, - with_groups=True, - traverse_pictures=traverse_pictures, - # ... - ) - ): - if item.self_ref in my_visited: - continue - else: - my_visited.add(item.self_ref) - ######## - # groups - ######## - if isinstance(item, (UnorderedList, OrderedList)): - part = self.list_serializer.serialize( + ######## + # groups + ######## + if isinstance(item, (UnorderedList, OrderedList)): + part = self.list_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + list_level=list_level, + is_inline_scope=is_inline_scope, + visited=my_visited, + **kwargs, + ) + elif isinstance(item, InlineGroup): + part = self.inline_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + list_level=list_level, + visited=my_visited, + **kwargs, + ) + ########### + # doc items + ########### + elif isinstance(item, DocItem) and item.label in label_blocklist: + return empty_res + elif isinstance(item, TextItem): + part = ( + self.text_serializer.serialize( item=item, doc_serializer=self, doc=self.doc, - list_level=list_level, is_inline_scope=is_inline_scope, - visited=my_visited, - ) - elif isinstance(item, InlineGroup): - part = self.inline_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - list_level=list_level, - visited=my_visited, + **kwargs, ) - ########### - # doc items - ########### - elif isinstance(item, DocItem) and item.label in label_blocklist: + if item.self_ref not in self.get_excluded_refs(**kwargs) + else empty_res + ) + elif isinstance(item, TableItem): + part = self.table_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + **kwargs, + ) + elif isinstance(item, PictureItem): + part = self.picture_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + visited=my_visited, + **kwargs, + ) + elif isinstance(item, KeyValueItem): + part = self.key_value_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + **kwargs, + ) + elif isinstance(item, FormItem): + part = self.form_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + **kwargs, + ) + else: + part = self.fallback_serializer.serialize( + item=item, + doc_serializer=self, + doc=self.doc, + **kwargs, + ) + return part + + # making some assumptions about the kwargs it can pass + @override + def get_parts( + self, + item: Optional[NodeItem] = None, + *, + traverse_pictures: bool = False, + list_level: int = 0, + is_inline_scope: bool = False, + visited: Optional[set[str]] = None, # refs of visited items + **kwargs, + ) -> list[SerializationResult]: + """Get the components to be combined for serializing this node.""" + parts: list[SerializationResult] = [] + my_visited: set[str] = visited if visited is not None else set() + params = self.params.merge_with_patch(patch=kwargs) + for item, _ in self.doc.iterate_items( + root=item, + with_groups=True, + traverse_pictures=traverse_pictures, + included_content_layers=params.layers, + ): + if item.self_ref in my_visited: continue - elif isinstance(item, TextItem): - part = ( - self.text_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - is_inline_scope=is_inline_scope, - ) - if item.self_ref not in self.get_excluded_refs() - else SerializationResult(text="") - ) - elif isinstance(item, TableItem): - part = self.table_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - ) - elif isinstance(item, PictureItem): - part = self.picture_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - visited=my_visited, - image_mode=self.image_mode, - image_placeholder=self.image_placeholder, - ) - elif isinstance(item, KeyValueItem): - part = self.key_value_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - ) - elif isinstance(item, FormItem): - part = self.form_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - ) else: - part = self.fallback_serializer.serialize( - item=item, - doc_serializer=self, - doc=self.doc, - ) + my_visited.add(item.self_ref) + part = self.serialize( + item=item, + list_level=list_level, + is_inline_scope=is_inline_scope, + visited=my_visited, + **kwargs, + ) if part.text: parts.append(part) return parts @@ -242,8 +325,9 @@ def post_process( **kwargs, ) -> str: """Apply some text post-processing steps.""" + params = self.params.merge_with_patch(patch=kwargs) res = text - if self.include_formatting and formatting: + if params.include_formatting and formatting: if formatting.bold: res = self.serialize_bold(text=res) if formatting.italic: @@ -252,7 +336,7 @@ def post_process( res = self.serialize_underline(text=res) if formatting.strikethrough: res = self.serialize_strikethrough(text=res) - if self.include_hyperlinks and hyperlink: + if params.include_hyperlinks and hyperlink: res = self.serialize_hyperlink(text=res, hyperlink=hyperlink) return res @@ -287,67 +371,19 @@ def serialize_hyperlink( def serialize_captions( self, item: FloatingItem, - separator: Optional[str] = None, **kwargs, ) -> SerializationResult: """Serialize the item's captions.""" - text_parts: list[str] = [ - it.text - for cap in item.captions - if isinstance(it := cap.resolve(self.doc), TextItem) - and it.self_ref not in self.get_excluded_refs() - ] - text_res = (separator or "\n").join(text_parts) - text_res = self.post_process(text=text_res) - return SerializationResult(text=text_res) - - -class PictureSerializer(BasePictureSerializer): - """Class for picture serializers.""" - - # helper function - def _serialize_content( - self, - item: PictureItem, - doc_serializer: "BaseDocSerializer", - doc: DoclingDocument, - separator: Optional[str] = None, - visited: Optional[set[str]] = None, - **kwargs, - ) -> SerializationResult: - parts = doc_serializer.get_parts( - node=item, - traverse_pictures=True, - visited=visited, - ) - text_res = (separator or " ").join([p.text for p in parts]) - # NOTE: we do no postprocessing since already done as needed - return SerializationResult(text=text_res) - - # helper function - def _serialize_annotations( - self, - item: PictureItem, - doc_serializer: "BaseDocSerializer", - doc: DoclingDocument, - separator: Optional[str] = None, - **kwargs, - ) -> SerializationResult: - text_parts: list[str] = [] - for annotation in item.annotations: - if isinstance(annotation, PictureClassificationData): - predicted_class = ( - annotation.predicted_classes[0].class_name - if annotation.predicted_classes - else None - ) - if predicted_class is not None: - text_parts.append(f"Picture type: {predicted_class}") - elif isinstance(annotation, PictureMoleculeData): - text_parts.append(f"SMILES: {annotation.smi}") - elif isinstance(annotation, PictureDescriptionData): - text_parts.append(f"Description: {annotation.text}") - - text_res = (separator or "\n").join(text_parts) - text_res = doc_serializer.post_process(text=text_res) + params = self.params.merge_with_patch(patch=kwargs) + if DocItemLabel.CAPTION in params.labels: + text_parts: list[str] = [ + it.text + for cap in item.captions + if isinstance(it := cap.resolve(self.doc), TextItem) + and it.self_ref not in self.get_excluded_refs(**kwargs) + ] + text_res = params.caption_delim.join(text_parts) + text_res = self.post_process(text=text_res) + else: + text_res = "" return SerializationResult(text=text_res) diff --git a/docling_core/experimental/serializer/doctags.py b/docling_core/experimental/serializer/doctags.py new file mode 100644 index 00000000..84c9ae38 --- /dev/null +++ b/docling_core/experimental/serializer/doctags.py @@ -0,0 +1,455 @@ +"""Define classes for Doctags serialization.""" + +import html +from enum import Enum +from pathlib import Path +from typing import Optional, Union + +from pydantic import AnyUrl, BaseModel +from typing_extensions import override + +from docling_core.experimental.serializer.base import ( + BaseDocSerializer, + BaseFallbackSerializer, + BaseFormSerializer, + BaseInlineSerializer, + BaseKeyValueSerializer, + BaseListSerializer, + BasePictureSerializer, + BaseTableSerializer, + BaseTextSerializer, + SerializationResult, +) +from docling_core.experimental.serializer.common import CommonParams, DocSerializer +from docling_core.types.doc.document import ( + CodeItem, + DoclingDocument, + Formatting, + FormItem, + InlineGroup, + KeyValueItem, + ListItem, + NodeItem, + OrderedList, + PictureClassificationData, + PictureItem, + PictureMoleculeData, + TableItem, + TextItem, + UnorderedList, +) +from docling_core.types.doc.tokens import DocumentToken + + +def _wrap(text: str, wrap_tag: str) -> str: + return f"<{wrap_tag}>{text}" + + +class DocTagsParams(CommonParams): + """DocTags-specific serialization parameters.""" + + class Mode(str, Enum): + """DocTags serialization mode.""" + + MINIFIED = "minified" + HUMAN_FRIENDLY = "human_friendly" + + new_line: str = "" + xsize: int = 500 + ysize: int = 500 + add_location: bool = True + add_caption: bool = True + add_content: bool = True + add_table_cell_location: bool = False + add_table_cell_text: bool = True + add_page_break: bool = True + + mode: Mode = Mode.HUMAN_FRIENDLY + + +def _get_delim(mode: DocTagsParams.Mode) -> str: + if mode == DocTagsParams.Mode.HUMAN_FRIENDLY: + delim = "\n" + elif mode == DocTagsParams.Mode.MINIFIED: + delim = "" + else: + raise RuntimeError(f"Unknown DocTags mode: {mode}") + return delim + + +class DocTagsTextSerializer(BaseModel, BaseTextSerializer): + """DocTags-specific text item serializer.""" + + @override + def serialize( + self, + *, + item: TextItem, + doc_serializer: BaseDocSerializer, + doc: DoclingDocument, + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + from docling_core.types.doc.document import SectionHeaderItem + + params = DocTagsParams(**kwargs) + wrap_tag: Optional[str] = DocumentToken.create_token_name_from_doc_item_label( + label=item.label, + **({"level": item.level} if isinstance(item, SectionHeaderItem) else {}), + ) + parts: list[str] = [] + + if params.add_location: + location = item.get_location_tokens( + doc=doc, + new_line=params.new_line, + xsize=params.xsize, + ysize=params.ysize, + ) + if location: + parts.append(location) + + if params.add_content: + text_part = item.text + text_part = doc_serializer.post_process( + text=text_part, + escape_html=False, # TODO review + formatting=item.formatting, + hyperlink=item.hyperlink, + ) + + if isinstance(item, CodeItem): + language_token = DocumentToken.get_code_language_token( + code_language=item.code_language, + ) + text_part = f"{language_token}{text_part}" + else: + text_part = text_part.strip() + if isinstance(item, ListItem): + wrap_tag = None # deferring list item tags to list handling + + if text_part: + parts.append(text_part) + + res = "".join(parts) + if wrap_tag is not None: + res = _wrap(text=res, wrap_tag=wrap_tag) + return SerializationResult(text=res) + + +class DocTagsTableSerializer(BaseTableSerializer): + """DocTags-specific table item serializer.""" + + @override + def serialize( + self, + *, + item: TableItem, + doc_serializer: BaseDocSerializer, + doc: DoclingDocument, + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + params = DocTagsParams(**kwargs) + + body = "" + + if item.self_ref not in doc_serializer.get_excluded_refs(**kwargs): + if params.add_location: + body += item.get_location_tokens( + doc=doc, + new_line=params.new_line, + xsize=params.xsize, + ysize=params.ysize, + ) + + body += item.export_to_otsl( + doc=doc, + add_cell_location=params.add_table_cell_location, + add_cell_text=params.add_table_cell_text, + xsize=params.xsize, + ysize=params.ysize, + ) + + if params.add_caption and len(item.captions): + text = doc_serializer.serialize_captions(item, **kwargs).text + + if len(text): + body += f"<{DocumentToken.CAPTION.value}>" + for caption in item.captions: + if caption.cref not in doc_serializer.get_excluded_refs(**kwargs): + body += caption.resolve(doc).get_location_tokens( + doc=doc, + new_line=params.new_line, + xsize=params.xsize, + ysize=params.ysize, + ) + body += f"{text.strip()}" + body += f"" + body += f"{params.new_line}" + + if body: + body = _wrap(text=body, wrap_tag=DocumentToken.OTSL.value) + + return SerializationResult(text=body) + + +class DocTagsPictureSerializer(BasePictureSerializer): + """DocTags-specific picture item serializer.""" + + @override + def serialize( + self, + *, + item: PictureItem, + doc_serializer: BaseDocSerializer, + doc: DoclingDocument, + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + params = DocTagsParams(**kwargs) + + parts: list[str] = [] + + if item.self_ref not in doc_serializer.get_excluded_refs(**kwargs): + body = "" + if params.add_location: + body += item.get_location_tokens( + doc=doc, + new_line=params.new_line, + xsize=params.xsize, + ysize=params.ysize, + ) + + classifications = [ + ann + for ann in item.annotations + if isinstance(ann, PictureClassificationData) + ] + if len(classifications) > 0: + predicted_class = classifications[0].predicted_classes[0].class_name + body += DocumentToken.get_picture_classification_token(predicted_class) + + smiles_annotations = [ + ann for ann in item.annotations if isinstance(ann, PictureMoleculeData) + ] + if len(smiles_annotations) > 0: + body += _wrap( + text=smiles_annotations[0].smi, wrap_tag=DocumentToken.SMILES.value + ) + parts.append(body) + + if params.add_caption and len(item.captions): + text = doc_serializer.serialize_captions(item, **kwargs).text + + if len(text): + body = "" + for caption in item.captions: + if caption.cref not in doc_serializer.get_excluded_refs(**kwargs): + body += caption.resolve(doc).get_location_tokens( + doc=doc, + new_line=params.new_line, + xsize=params.xsize, + ysize=params.ysize, + ) + body += f"{text.strip()}" + if body: + body = _wrap(text=body, wrap_tag=DocumentToken.CAPTION.value) + parts.append(body) + + text = "".join(parts) + if text: + token = DocumentToken.create_token_name_from_doc_item_label( + label=item.label + ) + text = _wrap(text=text, wrap_tag=token) + return SerializationResult(text=text) + + +class DocTagsKeyValueSerializer(BaseKeyValueSerializer): + """DocTags-specific key-value item serializer.""" + + @override + def serialize( + self, + *, + item: KeyValueItem, + doc_serializer: "BaseDocSerializer", + doc: DoclingDocument, + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + # TODO add actual implementation + text_res = "" + return SerializationResult(text=text_res) + + +class DocTagsFormSerializer(BaseFormSerializer): + """DocTags-specific form item serializer.""" + + @override + def serialize( + self, + *, + item: FormItem, + doc_serializer: "BaseDocSerializer", + doc: DoclingDocument, + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + # TODO add actual implementation + text_res = "" + return SerializationResult(text=text_res) + + +class DocTagsListSerializer(BaseModel, BaseListSerializer): + """DocTags-specific list serializer.""" + + indent: int = 4 + + @override + def serialize( + self, + *, + item: Union[UnorderedList, OrderedList], + doc_serializer: "BaseDocSerializer", + doc: DoclingDocument, + list_level: int = 0, + is_inline_scope: bool = False, + visited: Optional[set[str]] = None, # refs of visited items + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + my_visited = visited or set() + params = DocTagsParams(**kwargs) + parts = doc_serializer.get_parts( + item=item, + list_level=list_level + 1, + is_inline_scope=is_inline_scope, + visited=my_visited, + **kwargs, + ) + delim = _get_delim(mode=params.mode) + if parts: + text_res = delim.join( + [ + _wrap(text=p.text, wrap_tag=DocumentToken.LIST_ITEM.value) + for p in parts + ] + ) + text_res = f"{text_res}{delim}" + wrap_tag = ( + DocumentToken.ORDERED_LIST.value + if isinstance(item, OrderedList) + else DocumentToken.UNORDERED_LIST.value + ) + text_res = _wrap(text=text_res, wrap_tag=wrap_tag) + else: + text_res = "" + return SerializationResult(text=text_res) + + +class DocTagsInlineSerializer(BaseInlineSerializer): + """DocTags-specific inline group serializer.""" + + @override + def serialize( + self, + *, + item: InlineGroup, + doc_serializer: "BaseDocSerializer", + doc: DoclingDocument, + list_level: int = 0, + visited: Optional[set[str]] = None, # refs of visited items + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + my_visited = visited or set() + params = DocTagsParams(**kwargs) + parts = doc_serializer.get_parts( + item=item, + list_level=list_level, + is_inline_scope=True, + visited=my_visited, + **kwargs, + ) + wrap_tag = DocumentToken.INLINE.value + delim = _get_delim(mode=params.mode) + text_res = delim.join([p.text for p in parts if p.text]) + if text_res: + text_res = f"{text_res}{delim}" + text_res = _wrap(text=text_res, wrap_tag=wrap_tag) + return SerializationResult(text=text_res) + + +class DocTagsFallbackSerializer(BaseFallbackSerializer): + """DocTags-specific fallback serializer.""" + + @override + def serialize( + self, + *, + item: NodeItem, + doc_serializer: "BaseDocSerializer", + doc: DoclingDocument, + **kwargs, + ) -> SerializationResult: + """Serializes the passed item.""" + text_res = "" + return SerializationResult(text=text_res) + + +class DocTagsDocSerializer(DocSerializer): + """DocTags-specific document serializer.""" + + text_serializer: BaseTextSerializer = DocTagsTextSerializer() + table_serializer: BaseTableSerializer = DocTagsTableSerializer() + picture_serializer: BasePictureSerializer = DocTagsPictureSerializer() + key_value_serializer: BaseKeyValueSerializer = DocTagsKeyValueSerializer() + form_serializer: BaseFormSerializer = DocTagsFormSerializer() + fallback_serializer: BaseFallbackSerializer = DocTagsFallbackSerializer() + + list_serializer: BaseListSerializer = DocTagsListSerializer() + inline_serializer: BaseInlineSerializer = DocTagsInlineSerializer() + + params: DocTagsParams = DocTagsParams() + + def post_process( + self, + text: str, + *, + escape_html: bool = True, + formatting: Optional[Formatting] = None, + hyperlink: Optional[Union[AnyUrl, Path]] = None, + **kwargs, + ) -> str: + """Apply some text post-processing steps.""" + res = text + if escape_html: + res = html.escape(res, quote=False) + res = super().post_process( + text=res, + formatting=formatting, + hyperlink=hyperlink, + ) + return res + + @override + def serialize_page(self, parts: list[SerializationResult]) -> SerializationResult: + """Serialize a page out of its parts.""" + delim = _get_delim(mode=self.params.mode) + text_res = delim.join([p.text for p in parts]) + return SerializationResult(text=text_res) + + @override + def serialize_doc(self, pages: list[SerializationResult]) -> SerializationResult: + """Serialize a document out of its pages.""" + delim = _get_delim(mode=self.params.mode) + if self.params.add_page_break: + page_sep = f"{delim}<{DocumentToken.PAGE_BREAK.value}>{delim}" + content = page_sep.join([p.text for p in pages if p.text]) + else: + content = self.serialize_page(parts=pages).text + wrap_tag = DocumentToken.DOCUMENT.value + text_res = f"<{wrap_tag}>{content}{delim}" + return SerializationResult(text=text_res) diff --git a/docling_core/experimental/serializer/markdown.py b/docling_core/experimental/serializer/markdown.py index 3e6f7849..99d7d394 100644 --- a/docling_core/experimental/serializer/markdown.py +++ b/docling_core/experimental/serializer/markdown.py @@ -26,10 +26,11 @@ BaseTextSerializer, SerializationResult, ) -from docling_core.experimental.serializer.common import DocSerializer +from docling_core.experimental.serializer.common import CommonParams, DocSerializer from docling_core.types.doc.base import ImageRefMode from docling_core.types.doc.document import ( CodeItem, + ContentLayer, DocItem, DoclingDocument, Formatting, @@ -49,10 +50,20 @@ ) -class MarkdownTextSerializer(BaseModel, BaseTextSerializer): - """Markdown-specific text item serializer.""" +class MarkdownParams(CommonParams): + """Markdown-specific serialization parameters.""" + layers: set[ContentLayer] = {ContentLayer.BODY} + image_mode: ImageRefMode = ImageRefMode.PLACEHOLDER + image_placeholder: str = "" + indent: int = 4 wrap_width: Optional[PositiveInt] = None + page_break_placeholder: Optional[str] = None # e.g. "" + escape_underscores: bool = True + + +class MarkdownTextSerializer(BaseModel, BaseTextSerializer): + """Markdown-specific text item serializer.""" @override def serialize( @@ -65,6 +76,7 @@ def serialize( **kwargs, ) -> SerializationResult: """Serializes the passed item.""" + params = MarkdownParams(**kwargs) escape_html = True escape_underscores = True if isinstance(item, TitleItem): @@ -84,8 +96,8 @@ def serialize( res = "" escape_html = False escape_underscores = False - elif self.wrap_width: - res = textwrap.fill(item.text, width=self.wrap_width) + elif params.wrap_width: + res = textwrap.fill(item.text, width=params.wrap_width) else: res = item.text res = doc_serializer.post_process( @@ -113,12 +125,14 @@ def serialize( """Serializes the passed item.""" text_parts: list[str] = [] - if caption_txt := doc_serializer.serialize_captions( + cap_res = doc_serializer.serialize_captions( item=item, - ).text: - text_parts.append(caption_txt) + **kwargs, + ) + if cap_res.text: + text_parts.append(cap_res.text) - if item.self_ref not in doc_serializer.get_excluded_refs(): + if item.self_ref not in doc_serializer.get_excluded_refs(**kwargs): rows = [ [ # make sure that md tables are not broken @@ -158,33 +172,26 @@ def serialize( item: PictureItem, doc_serializer: BaseDocSerializer, doc: DoclingDocument, - image_mode: Optional[ImageRefMode] = None, - image_placeholder: Optional[str] = None, **kwargs, ) -> SerializationResult: """Serializes the passed item.""" - my_image_mode = ( - image_mode if image_mode is not None else ImageRefMode.PLACEHOLDER - ) - my_image_placeholder = ( - image_placeholder if image_placeholder is not None else "" - ) + params = MarkdownParams(**kwargs) texts: list[str] = [] cap_res = doc_serializer.serialize_captions( item=item, - separator="\n", + **kwargs, ) if cap_res.text: texts.append(cap_res.text) - if item.self_ref not in doc_serializer.get_excluded_refs(): + if item.self_ref not in doc_serializer.get_excluded_refs(**kwargs): img_res = self._serialize_image_part( item=item, doc=doc, - image_mode=my_image_mode, - image_placeholder=my_image_placeholder, + image_mode=params.image_mode, + image_placeholder=params.image_placeholder, ) if img_res.text: texts.append(img_res.text) @@ -288,8 +295,6 @@ def serialize( class MarkdownListSerializer(BaseModel, BaseListSerializer): """Markdown-specific list serializer.""" - indent: int = 4 - @override def serialize( self, @@ -303,12 +308,14 @@ def serialize( **kwargs, ) -> SerializationResult: """Serializes the passed item.""" + params = MarkdownParams(**kwargs) my_visited = visited or set() parts = doc_serializer.get_parts( - node=item, + item=item, list_level=list_level + 1, is_inline_scope=is_inline_scope, visited=my_visited, + **kwargs, ) sep = "\n" my_parts: list[SerializationResult] = [] @@ -318,7 +325,7 @@ def serialize( else: my_parts.append(p) - indent_str = list_level * self.indent * " " + indent_str = list_level * params.indent * " " is_ol = isinstance(item, OrderedList) text_res = sep.join( [ @@ -351,7 +358,7 @@ def serialize( """Serializes the passed item.""" my_visited = visited or set() parts = doc_serializer.get_parts( - node=item, + item=item, list_level=list_level, is_inline_scope=True, visited=my_visited, @@ -393,6 +400,8 @@ class MarkdownDocSerializer(DocSerializer): list_serializer: BaseListSerializer = MarkdownListSerializer() inline_serializer: BaseInlineSerializer = MarkdownInlineSerializer() + params: MarkdownParams = MarkdownParams() + @override def serialize_bold(self, text: str, **kwargs): """Apply Markdown-specific bold serialization.""" @@ -450,7 +459,8 @@ def post_process( ) -> str: """Apply some text post-processing steps.""" res = text - if escape_underscores and self.escape_underscores: + params = self.params.merge_with_patch(patch=kwargs) + if escape_underscores and params.escape_underscores: res = self._escape_underscores(text) if escape_html: res = html.escape(res, quote=False) @@ -462,8 +472,17 @@ def post_process( return res @override - def serialize(self, **kwargs) -> SerializationResult: - """Run the serialization.""" - parts = self.get_parts() - text_res = "\n\n".join([p.text for p in parts if p.text]) + def serialize_page(self, parts: list[SerializationResult]) -> SerializationResult: + """Serialize a page out of its parts.""" + text_res = "\n\n".join([p.text for p in parts]) return SerializationResult(text=text_res) + + @override + def serialize_doc(self, pages: list[SerializationResult]) -> SerializationResult: + """Serialize a document out of its pages.""" + if self.params.page_break_placeholder is not None: + sep = f"\n\n{self.params.page_break_placeholder}\n\n" + text_res = sep.join([p.text for p in pages if p.text]) + return SerializationResult(text=text_res) + else: + return self.serialize_page(parts=pages) diff --git a/docling_core/types/doc/document.py b/docling_core/types/doc/document.py index f6943fc7..89dc4f03 100644 --- a/docling_core/types/doc/document.py +++ b/docling_core/types/doc/document.py @@ -50,7 +50,7 @@ GraphLinkLabel, GroupLabel, ) -from docling_core.types.doc.tokens import DocumentToken, TableToken +from docling_core.types.doc.tokens import _LOC_PREFIX, DocumentToken, TableToken from docling_core.types.doc.utils import ( get_html_tag_with_text_direction, get_text_direction, @@ -741,22 +741,23 @@ def export_to_document_tokens( :param add_content: bool: (Default value = True) """ - body = f"<{self.label.value}>{new_line}" + from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, + ) - if add_location: - body += self.get_location_tokens( - doc=doc, + serializer = DocTagsDocSerializer( + doc=doc, + params=DocTagsParams( new_line=new_line, xsize=xsize, ysize=ysize, - ) - - if add_content and self.text is not None: - body += f"{self.text.strip()}{new_line}" - - body += f"\n" - - return body + add_location=add_location, + add_content=add_content, + ), + ) + text = serializer.serialize(item=self).text + return text class TitleItem(TextItem): @@ -794,27 +795,23 @@ def export_to_document_tokens( :param add_content: bool: (Default value = True) """ - body = f"<{self.label.value}_level_{self.level}>{new_line}" - - # TODO: This must be done through an explicit mapping. - # assert DocumentToken.is_known_token( - # body - # ), f"failed DocumentToken.is_known_token({body})" + from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, + ) - if add_location: - body += self.get_location_tokens( - doc=doc, + serializer = DocTagsDocSerializer( + doc=doc, + params=DocTagsParams( new_line=new_line, xsize=xsize, ysize=ysize, - ) - - if add_content and self.text is not None: - body += f"{self.text.strip()}{new_line}" - - body += f"\n" - - return body + add_location=add_location, + add_content=add_content, + ), + ) + text = serializer.serialize(item=self).text + return text class ListItem(TextItem): @@ -884,22 +881,23 @@ def export_to_document_tokens( :param add_content: bool: (Default value = True) """ - body = f"<{self.label.value}>{new_line}" + from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, + ) - if add_location: - body += self.get_location_tokens( - doc=doc, + serializer = DocTagsDocSerializer( + doc=doc, + params=DocTagsParams( new_line=new_line, xsize=xsize, ysize=ysize, - ) - - if add_content and self.text is not None: - body += f"<_{self.code_language.value}_>{self.text}{new_line}" - - body += f"\n" - - return body + add_location=add_location, + add_content=add_content, + ), + ) + text = serializer.serialize(item=self).text + return text class FormulaItem(TextItem): @@ -953,7 +951,10 @@ def export_to_markdown( image_placeholder: str = "", ) -> str: """Export picture to Markdown format.""" - from docling_core.experimental.serializer.markdown import MarkdownDocSerializer + from docling_core.experimental.serializer.markdown import ( + MarkdownDocSerializer, + MarkdownParams, + ) if not add_caption: _logger.warning( @@ -962,19 +963,12 @@ def export_to_markdown( serializer = MarkdownDocSerializer( doc=self, - image_mode=image_mode, - ) - text = ( - serializer.picture_serializer.serialize( - item=self, - doc_serializer=serializer, - doc=doc, + params=MarkdownParams( image_mode=image_mode, image_placeholder=image_placeholder, - ).text - if serializer.picture_serializer - else "" + ), ) + text = serializer.serialize(item=self).text return text def export_to_html( @@ -1055,59 +1049,24 @@ def export_to_document_tokens( :param # not used at the moment """ - body = f"<{self.label.value}>{new_line}" - if add_location: - body += self.get_location_tokens( - doc=doc, + from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, + ) + + serializer = DocTagsDocSerializer( + doc=doc, + params=DocTagsParams( new_line=new_line, xsize=xsize, ysize=ysize, - ) - - classifications = [ - ann - for ann in self.annotations - if isinstance(ann, PictureClassificationData) - ] - if len(classifications) > 0: - # ! TODO: currently this code assumes class_name is of type 'str' - # ! TODO: when it will change to an ENUM --> adapt code - predicted_class = classifications[0].predicted_classes[0].class_name - body += DocumentToken.get_picture_classification_token(predicted_class) - - smiles_annotations = [ - ann for ann in self.annotations if isinstance(ann, PictureMoleculeData) - ] - if len(smiles_annotations) > 0: - body += ( - "<" - + DocumentToken.SMILES.value - + ">" - + smiles_annotations[0].smi - + "" - ) - - if add_caption and len(self.captions): - text = self.caption_text(doc) - - if len(text): - body += f"<{DocItemLabel.CAPTION.value}>" - for caption in self.captions: - body += caption.resolve(doc).get_location_tokens( - doc=doc, - new_line=new_line, - xsize=xsize, - ysize=ysize, - ) - body += f"{text.strip()}" - body += f"" - body += f"{new_line}" - - body += f"\n" - - return body + add_location=add_location, + add_content=add_content, + add_caption=add_caption, + ), + ) + text = serializer.serialize(item=self).text + return text class TableItem(FloatingItem): @@ -1171,18 +1130,8 @@ def export_to_markdown(self, doc: Optional["DoclingDocument"] = None) -> str: MarkdownDocSerializer, ) - serializer = MarkdownDocSerializer( - doc=doc, - ) - text = ( - serializer.table_serializer.serialize( - item=self, - doc_serializer=serializer, - doc=doc, - ).text - if serializer.table_serializer - else "" - ) + serializer = MarkdownDocSerializer(doc=doc) + text = serializer.serialize(item=self).text return text else: _logger.warning( @@ -1414,39 +1363,25 @@ def export_to_document_tokens( :param add_caption: bool: (Default value = True) """ - otsl_tag = DocumentToken.OTSL.value - - body = f"<{otsl_tag}>{new_line}" + from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, + ) - if add_location: - body += self.get_location_tokens( - doc=doc, + serializer = DocTagsDocSerializer( + doc=doc, + params=DocTagsParams( new_line=new_line, xsize=xsize, ysize=ysize, - ) - - body += self.export_to_otsl(doc, add_cell_location, add_cell_text, xsize, ysize) - - if add_caption and len(self.captions): - text = self.caption_text(doc) - - if len(text): - body += f"<{DocItemLabel.CAPTION.value}>" - for caption in self.captions: - body += caption.resolve(doc).get_location_tokens( - doc=doc, - new_line=new_line, - xsize=xsize, - ysize=ysize, - ) - body += f"{text.strip()}" - body += f"" - body += f"{new_line}" - - body += f"\n" - - return body + add_location=add_location, + add_caption=add_caption, + add_table_cell_location=add_cell_location, + add_table_cell_text=add_cell_text, + ), + ) + text = serializer.serialize(item=self).text + return text class GraphCell(BaseModel): @@ -2587,7 +2522,7 @@ def save_as_markdown( to_element=to_element, labels=labels, strict_text=strict_text, - escaping_underscores=escaping_underscores, + escape_underscores=escaping_underscores, image_placeholder=image_placeholder, image_mode=image_mode, indent=indent, @@ -2606,7 +2541,7 @@ def export_to_markdown( # noqa: C901 to_element: int = sys.maxsize, labels: set[DocItemLabel] = DOCUMENT_TOKENS_EXPORT_LABELS, strict_text: bool = False, - escaping_underscores: bool = True, + escape_underscores: bool = True, image_placeholder: str = "", image_mode: ImageRefMode = ImageRefMode.PLACEHOLDER, indent: int = 4, @@ -2648,25 +2583,22 @@ def export_to_markdown( # noqa: C901 """ from docling_core.experimental.serializer.markdown import ( MarkdownDocSerializer, - MarkdownListSerializer, - MarkdownTextSerializer, + MarkdownParams, ) serializer = MarkdownDocSerializer( doc=self, - start=from_element, - stop=to_element, - image_placeholder=image_placeholder, - image_mode=image_mode, - labels=labels, - layers=included_content_layers, - pages={page_no} if page_no is not None else None, - escaping_underscores=escaping_underscores, - text_serializer=MarkdownTextSerializer( - wrap_width=text_width if text_width > 0 else None, - ), - list_serializer=MarkdownListSerializer( + params=MarkdownParams( + labels=labels, + layers=included_content_layers, + pages={page_no} if page_no is not None else None, + start_idx=from_element, + stop_idx=to_element, + escape_underscores=escape_underscores, + image_placeholder=image_placeholder, + image_mode=image_mode, indent=indent, + wrap_width=text_width if text_width > 0 else None, ), ) ser_res = serializer.serialize() @@ -2696,7 +2628,7 @@ def export_to_text( # noqa: C901 to_element, labels, strict_text=True, - escaping_underscores=False, + escape_underscores=False, image_placeholder="", ) @@ -3189,7 +3121,7 @@ def otsl_extract_tokens_and_text(s: str): token for token in tokens if not ( - token.startswith(rf"<{DocumentToken.LOC.value}") + token.startswith(rf"<{_LOC_PREFIX}") or token in [ rf"<{DocumentToken.OTSL.value}>", @@ -3203,7 +3135,7 @@ def otsl_extract_tokens_and_text(s: str): token for token in text_parts if not ( - token.startswith(rf"<{DocumentToken.LOC.value}") + token.startswith(rf"<{_LOC_PREFIX}") or token in [ rf"<{DocumentToken.OTSL.value}>", @@ -3481,196 +3413,31 @@ def export_to_document_tokens( # noqa: C901 :returns: The content of the document formatted as a DocTags string. :rtype: str """ - - def _close_lists( - current_level: int, - previous_level: int, - ordered_list_stack: List[bool], - output_parts: List[str], - ) -> List[bool]: - """Close open list tags until the nesting level matches item's level.""" - while current_level < previous_level and ordered_list_stack: - last_is_ordered = ordered_list_stack.pop() - if last_is_ordered: - output_parts.append(f"\n") - else: - output_parts.append(f"\n") - previous_level -= 1 - return ordered_list_stack - - def _add_page_break_if_needed( - output_parts: List[str], - item, - prev_page_no, - page_break_enabled: bool, - ): - """Inserts a page-break token. - - Inserts a page-break token if the item's page number is different - from the previous item and page breaks are enabled. - Returns the updated output_parts list and the current page number. - """ - if not page_break_enabled: - return output_parts, prev_page_no - - if not item.prov: - return output_parts, prev_page_no - - current_page_no = item.prov[0].page_no - if prev_page_no is None: - return output_parts, current_page_no - - if current_page_no != prev_page_no: - output_parts.append(f"<{DocumentToken.PAGE_BREAK.value}>\n") - - return output_parts, current_page_no - - def _get_standalone_captions(document_body): - """Identify captions that are not attached to any table or figure.""" - all_captions = set() - matched_captions = set() - for item, _ in self.iterate_items(document_body, with_groups=True): - if item.label == DocItemLabel.CAPTION: - all_captions.update([item.self_ref]) - if item.label in [DocItemLabel.PICTURE, DocItemLabel.TABLE]: - matched_captions.update([caption.cref for caption in item.captions]) - - return all_captions - matched_captions - - # Initialization - output_parts: List[str] = [] - ordered_list_stack: List[bool] = [] - previous_level = 0 - previous_page_no = None - - # Precompute standalone captions - standalone_captions = _get_standalone_captions(self.body) - - # Begin document - output_parts.append(f"<{DocumentToken.DOCUMENT.value}>{delim}") - - for ix, (item, current_level) in enumerate( - self.iterate_items( - self.body, - with_groups=True, - included_content_layers={ - ContentLayer.BODY, - ContentLayer.FURNITURE, - }, - ) - ): - # Close lists if we've moved to a lower nesting level - if current_level < previous_level and ordered_list_stack: - ordered_list_stack = _close_lists( - current_level, - previous_level, - ordered_list_stack, - output_parts, - ) - previous_level = current_level - - # Skip items outside the specified element range - if ix < from_element or ix >= to_element: - continue - - # Skip items whose label is not in the allowed set - if isinstance(item, DocItem) and (item.label not in labels): - continue - - # Skip captions that are not standalone as they will be included below - # by the export functions of Table and Picture - if ( - isinstance(item, TextItem) - and item.label == DocItemLabel.CAPTION - and item.self_ref not in standalone_captions - ): - continue - - # Handle list groups - if isinstance(item, GroupItem): - if item.label == GroupLabel.ORDERED_LIST: - output_parts.append(f"<{DocumentToken.ORDERED_LIST.value}>{delim}") - ordered_list_stack.append(True) - elif item.label == GroupLabel.LIST: - output_parts.append( - f"<{DocumentToken.UNORDERED_LIST.value}>{delim}" - ) - ordered_list_stack.append(False) - continue - - # For other item types, optionally insert page-break if the page changed - output_parts, previous_page_no = _add_page_break_if_needed( - output_parts, item, previous_page_no, add_page_index - ) - - if isinstance(item, SectionHeaderItem): - output_parts.append( - item.export_to_document_tokens( - doc=self, - new_line=delim, - xsize=xsize, - ysize=ysize, - add_location=add_location, - add_content=add_content, - ) - ) - elif isinstance(item, CodeItem): - output_parts.append( - item.export_to_document_tokens( - doc=self, - new_line=delim, - xsize=xsize, - ysize=ysize, - add_location=add_location, - add_content=add_content, - ) - ) - elif isinstance(item, TextItem): - output_parts.append( - item.export_to_document_tokens( - doc=self, - new_line=delim, - xsize=xsize, - ysize=ysize, - add_location=add_location, - add_content=add_content, - ) - ) - elif isinstance(item, TableItem): - output_parts.append( - item.export_to_document_tokens( - doc=self, - new_line=delim, - xsize=xsize, - ysize=ysize, - add_location=add_location, - add_cell_location=add_table_cell_location, - add_cell_text=add_table_cell_text, - add_caption=True, - ) - ) - elif isinstance(item, PictureItem): - output_parts.append( - item.export_to_document_tokens( - doc=self, - new_line=delim, - xsize=xsize, - ysize=ysize, - add_caption=True, - add_location=add_location, - add_content=add_content, - ) - ) - - # End any lists that might still be open - ordered_list_stack = _close_lists( - 0, previous_level, ordered_list_stack, output_parts + from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, ) - # End document - output_parts.append(f"") - - return "".join(output_parts) + serializer = DocTagsDocSerializer( + doc=self, + params=DocTagsParams( + labels=labels, + # layers=..., # not exposed + start_idx=from_element, + stop_idx=to_element, + new_line=delim, + xsize=xsize, + ysize=ysize, + add_location=add_location, + # add_caption=..., # not exposed + add_content=add_content, + add_page_break=add_page_index, + add_table_cell_location=add_table_cell_location, + add_table_cell_text=add_table_cell_text, + ), + ) + ser_res = serializer.serialize() + return ser_res.text def _export_to_indented_text( self, diff --git a/docling_core/types/doc/tokens.py b/docling_core/types/doc/tokens.py index 10ddb995..7c0bb034 100644 --- a/docling_core/types/doc/tokens.py +++ b/docling_core/types/doc/tokens.py @@ -8,10 +8,10 @@ from enum import Enum from typing import Tuple -from docling_core.types.doc.labels import PictureClassificationLabel +from docling_core.types.doc.labels import DocItemLabel -class TableToken(Enum): +class TableToken(str, Enum): """Class to represent an LLM friendly representation of a Table.""" CELL_LABEL_COLUMN_HEADER = "" @@ -41,41 +41,207 @@ def is_known_token(label): return label in TableToken.get_special_tokens() -class DocumentToken(Enum): +_LOC_PREFIX = "loc_" +_SECTION_HEADER_PREFIX = "section_header_level_" + + +class _PictureClassificationToken(str, Enum): + """PictureClassificationToken.""" + + OTHER = "" + + # If more than one picture is grouped together, it + # is generally not possible to assign a label + PICTURE_GROUP = "" + + # General + PIE_CHART = "" + BAR_CHART = "" + LINE_CHART = "" + FLOW_CHART = "" + SCATTER_CHART = "" + HEATMAP = "" + REMOTE_SENSING = "" + + NATURAL_IMAGE = "" + + # Chemistry + MOLECULAR_STRUCTURE = "" + MARKUSH_STRUCTURE = "" + + # Company + ICON = "" + LOGO = "" + SIGNATURE = "" + STAMP = "" + QR_CODE = "" + BAR_CODE = "" + SCREENSHOT = "" + + # Geology/Geography + GEOGRAPHIC_MAP = "" + STRATIGRAPHIC_CHART = "" + + # Engineering + CAD_DRAWING = "" + ELECTRICAL_DIAGRAM = "" + + +class _CodeLanguageToken(str, Enum): + """CodeLanguageToken.""" + + ADA = "<_Ada_>" + AWK = "<_Awk_>" + BASH = "<_Bash_>" + BC = "<_bc_>" + C = "<_C_>" + C_SHARP = "<_C#_>" + C_PLUS_PLUS = "<_C++_>" + CMAKE = "<_CMake_>" + COBOL = "<_COBOL_>" + CSS = "<_CSS_>" + CEYLON = "<_Ceylon_>" + CLOJURE = "<_Clojure_>" + CRYSTAL = "<_Crystal_>" + CUDA = "<_Cuda_>" + CYTHON = "<_Cython_>" + D = "<_D_>" + DART = "<_Dart_>" + DC = "<_dc_>" + DOCKERFILE = "<_Dockerfile_>" + ELIXIR = "<_Elixir_>" + ERLANG = "<_Erlang_>" + FORTRAN = "<_FORTRAN_>" + FORTH = "<_Forth_>" + GO = "<_Go_>" + HTML = "<_HTML_>" + HASKELL = "<_Haskell_>" + HAXE = "<_Haxe_>" + JAVA = "<_Java_>" + JAVASCRIPT = "<_JavaScript_>" + JULIA = "<_Julia_>" + KOTLIN = "<_Kotlin_>" + LISP = "<_Lisp_>" + LUA = "<_Lua_>" + MATLAB = "<_Matlab_>" + MOONSCRIPT = "<_MoonScript_>" + NIM = "<_Nim_>" + OCAML = "<_OCaml_>" + OBJECTIVEC = "<_ObjectiveC_>" + OCTAVE = "<_Octave_>" + PHP = "<_PHP_>" + PASCAL = "<_Pascal_>" + PERL = "<_Perl_>" + PROLOG = "<_Prolog_>" + PYTHON = "<_Python_>" + RACKET = "<_Racket_>" + RUBY = "<_Ruby_>" + RUST = "<_Rust_>" + SML = "<_SML_>" + SQL = "<_SQL_>" + SCALA = "<_Scala_>" + SCHEME = "<_Scheme_>" + SWIFT = "<_Swift_>" + TYPESCRIPT = "<_TypeScript_>" + UNKNOWN = "<_unknown_>" + VISUALBASIC = "<_VisualBasic_>" + XML = "<_XML_>" + YAML = "<_YAML_>" + + +class DocumentToken(str, Enum): """Class to represent an LLM friendly representation of a Document.""" DOCUMENT = "doctag" OTSL = "otsl" ORDERED_LIST = "ordered_list" UNORDERED_LIST = "unordered_list" - LOC = "loc_" PAGE_BREAK = "page_break" SMILES = "smiles" + INLINE = "inline" + + CAPTION = "caption" + FOOTNOTE = "footnote" + FORMULA = "formula" + LIST_ITEM = "list_item" + PAGE_FOOTER = "page_footer" + PAGE_HEADER = "page_header" + PICTURE = "picture" + TABLE = "table" + TEXT = "text" + TITLE = "title" + DOCUMENT_INDEX = "document_index" + CODE = "code" + CHECKBOX_SELECTED = "checkbox_selected" + CHECKBOX_UNSELECTED = "checkbox_unselected" + FORM = "form" + KEY_VALUE_REGION = "key_value_region" + + PARAGRAPH = "paragraph" + REFERENCE = "reference" @classmethod def get_special_tokens( cls, - page_dimension: Tuple[int, int] = (100, 100), + page_dimension: Tuple[int, int] = (500, 500), ): """Function to get all special document tokens.""" - special_tokens = [token.value for token in cls] + special_tokens: list[str] = [] + for token in cls: + special_tokens.append(f"<{token.value}>") + special_tokens.append(f"") for i in range(6): special_tokens += [ - f"", - f"", + f"<{_SECTION_HEADER_PREFIX}{i}>", + f"", ] - # Add dynamically picture classification tokens - for _, member in PictureClassificationLabel.__members__.items(): - special_tokens.append(f"<{member}>") + special_tokens.extend([t.value for t in _PictureClassificationToken]) + special_tokens.extend([t.value for t in _CodeLanguageToken]) + + special_tokens.extend(TableToken.get_special_tokens()) # Adding dynamically generated location-tokens - for i in range(0, max(page_dimension[0] + 1, page_dimension[1] + 1)): - special_tokens.append(f"") + for i in range(0, max(page_dimension[0], page_dimension[1])): + special_tokens.append(f"<{_LOC_PREFIX}{i}>") return special_tokens + @classmethod + def create_token_name_from_doc_item_label(cls, label: str, level: int = 1) -> str: + """Get token corresponding to passed doc item label.""" + doc_token_by_item_label = { + DocItemLabel.CAPTION: DocumentToken.CAPTION, + DocItemLabel.FOOTNOTE: DocumentToken.FOOTNOTE, + DocItemLabel.FORMULA: DocumentToken.FORMULA, + DocItemLabel.LIST_ITEM: DocumentToken.LIST_ITEM, + DocItemLabel.PAGE_FOOTER: DocumentToken.PAGE_FOOTER, + DocItemLabel.PAGE_HEADER: DocumentToken.PAGE_HEADER, + DocItemLabel.PICTURE: DocumentToken.PICTURE, + DocItemLabel.TABLE: DocumentToken.TABLE, + DocItemLabel.TEXT: DocumentToken.TEXT, + DocItemLabel.TITLE: DocumentToken.TITLE, + DocItemLabel.DOCUMENT_INDEX: DocumentToken.DOCUMENT_INDEX, + DocItemLabel.CODE: DocumentToken.CODE, + DocItemLabel.CHECKBOX_SELECTED: DocumentToken.CHECKBOX_SELECTED, + DocItemLabel.CHECKBOX_UNSELECTED: DocumentToken.CHECKBOX_UNSELECTED, + DocItemLabel.FORM: DocumentToken.FORM, + DocItemLabel.KEY_VALUE_REGION: DocumentToken.KEY_VALUE_REGION, + DocItemLabel.PARAGRAPH: DocumentToken.PARAGRAPH, + DocItemLabel.REFERENCE: DocumentToken.REFERENCE, + } + + res: str + if label == DocItemLabel.SECTION_HEADER: + res = f"{_SECTION_HEADER_PREFIX}{level}" + else: + try: + res = doc_token_by_item_label[DocItemLabel(label)].value + except KeyError as e: + raise RuntimeError(f"Unexpected DocItemLabel: {label}") from e + return res + @staticmethod def is_known_token(label): """Function to check if label is in tokens.""" @@ -83,29 +249,29 @@ def is_known_token(label): @staticmethod def get_picture_classification_token(classification: str) -> str: - """Function to get picture classification tokens.""" - return f"<{classification}>" + """Function to get the token for a given picture classification value.""" + return _PictureClassificationToken(f"<{classification}>").value + + @staticmethod + def get_code_language_token(code_language: str) -> str: + """Function to get the token for a given code language.""" + return _CodeLanguageToken(f"<_{code_language}_>").value @staticmethod - def get_location_token(val: float, rnorm: int = 100): + def get_location_token(val: float, rnorm: int = 500): # TODO review """Function to get location tokens.""" val_ = round(rnorm * val) - - if val_ < 0: - return "" - - if val_ > rnorm: - return f"" - - return f"" + val_ = max(val_, 0) + val_ = min(val_, rnorm - 1) + return f"<{_LOC_PREFIX}{val_}>" @staticmethod def get_location( bbox: tuple[float, float, float, float], page_w: float, page_h: float, - xsize: int = 100, - ysize: int = 100, + xsize: int = 500, # TODO review + ysize: int = 500, # TODO review ): """Get the location string give bbox and page-dim.""" assert bbox[0] <= bbox[2], f"bbox[0]<=bbox[2] => {bbox[0]}<={bbox[2]}" diff --git a/test/data/doc/2206.01062.yaml.dt b/test/data/doc/2206.01062.yaml.dt index 95b52cc7..b00a5cc9 100644 --- a/test/data/doc/2206.01062.yaml.dt +++ b/test/data/doc/2206.01062.yaml.dt @@ -82,7 +82,6 @@ Phase 3: Training. After a first trial with a small group of people, we realised that providing the annotation guideline and a set of random practice pages did not yield the desired quality level for layout annotation. Therefore we prepared a subset of pages with two different complexity levels, each with a practice and an exam part. 974 pages were reference-annotated by one proficient core team member. Annotation staff were then given the task to annotate the same subsets (blinded from the reference). By comparing the annotations of each staff member with the reference annotations, we could quantify how closely their annotations matched the reference. Only after passing two exam levels with high annotation quality, staff were admitted into the production phase. Practice iterations 05237a14f2524e3f53c8454b074409d05078038a6a36b770fcc8ec7e540deae0 -Figure 4: Examples of plausible annotation alternatives for the same page. Criteria in our annotation guideline can resolve cases A to C, while the case D remains ambiguous. were carried out over a timeframe of 12 weeks, after which 8 of the 40 initially allocated annotators did not pass the bar. Phase 4: Production annotation. The previously selected 80K pages were annotated with the defined 11 class labels by 32 annotators. This production phase took around three months to complete. All annotations were created online through CCS, which visualises the programmatic PDF text-cells as an overlay on the page. The page annotation are obtained by drawing rectangular bounding-boxes, as shown in Figure 3. With regard to the annotation practices, we implemented a few constraints and capabilities on the tooling level. First, we only allow non-overlapping, vertically oriented, rectangular boxes. For the large majority of documents, this constraint was sufficient and it speeds up the annotation considerably in comparison with arbitrary segmentation shapes. Second, annotator staff were not able to see each other's annotations. This was enforced by design to avoid any bias in the annotation, which could skew the numbers of the inter-annotator agreement (see Table 1). We wanted diff --git a/test/data/doc/2206.01062.yaml.md b/test/data/doc/2206.01062.yaml.md index d388b69c..c6d3abe1 100644 --- a/test/data/doc/2206.01062.yaml.md +++ b/test/data/doc/2206.01062.yaml.md @@ -53,6 +53,8 @@ In this paper, we present the DocLayNet dataset. It provides pageby-page layout - (3) Detailed Label Set : We define 11 class labels to distinguish layout features in high detail. PubLayNet provides 5 labels; DocBank provides 13, although not a superset of ours. - (4) Redundant Annotations : A fraction of the pages in the DocLayNet data set carry more than one human annotation. +1 https://developer.ibm.com/exchanges/data/all/doclaynet + This enables experimentation with annotation uncertainty and quality control analysis. - (5) Pre-defined Train-, Test- & Validation-set : Like DocBank, we provide fixed train-, test- & validation-sets to ensure proportional representation of the class-labels. Further, we prevent leakage of unique layouts across sets, which has a large effect on model accuracy scores. @@ -85,6 +87,8 @@ We did not control the document selection with regard to language. The vast majo To ensure that future benchmarks in the document-layout analysis community can be easily compared, we have split up DocLayNet into pre-defined train-, test- and validation-sets. In this way, we can avoid spurious variations in the evaluation scores due to random splitting in train-, test- and validation-sets. We also ensured that less frequent labels are represented in train and test sets in equal proportions. +2 e.g. AAPL from https://www.annualreports.com/ + Table 1 shows the overall frequency and distribution of the labels among the different sets. Importantly, we ensure that subsets are only split on full-document boundaries. This avoids that pages of the same document are spread over train, test and validation set, which can give an undesired evaluation advantage to models and lead to overestimation of their prediction accuracy. We will show the impact of this decision in Section 5. In order to accommodate the different types of models currently in use by the community, we provide DocLayNet in an augmented COCO format [16]. This entails the standard COCO ground-truth file (in JSON format) with the associated page images (in PNG format, 1025 × 1025 pixels). Furthermore, custom fields have been added to each COCO record to specify document category, original document filename and page number. In addition, we also provide the original PDF pages, as well as sidecar files containing parsed PDF text and text-cell coordinates (in JSON). All additional files are linked to the primary page images by their matching filenames. @@ -125,6 +129,8 @@ Preparation work included uploading and parsing the sourced PDF documents in the Phase 2: Label selection and guideline. We reviewed the collected documents and identified the most common structural features they exhibit. This was achieved by identifying recurrent layout elements and lead us to the definition of 11 distinct class labels. These 11 class labels are Caption , Footnote , Formula List-item , , Pagefooter , Page-header , Picture , Section-header , Table , Text , and Title . Critical factors that were considered for the choice of these class labels were (1) the overall occurrence of the label, (2) the specificity of the label, (3) recognisability on a single page (i.e. no need for context from previous or next page) and (4) overall coverage of the page. Specificity ensures that the choice of label is not ambiguous, while coverage ensures that all meaningful items on a page can be annotated. We refrained from class labels that are very specific to a document category, such as Abstract in the Scientific Articles category. We also avoided class labels that are tightly linked to the semantics of the text. Labels such as Author and Affiliation , as seen in DocBank, are often only distinguishable by discriminating on +3 https://arxiv.org/ + the textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category. At first sight, the task of visual document-layout interpretation appears intuitive enough to obtain plausible annotations in most cases. However, during early trial-runs in the core team, we observed many cases in which annotators use different annotation styles, especially for documents with challenging layouts. For example, if a figure is presented with subfigures, one annotator might draw a single figure bounding-box, while another might annotate each subfigure separately. The same applies for lists, where one might annotate all list items in one block or each list item separately. In essence, we observed that challenging layouts would be annotated in different but plausible ways. To illustrate this, we show in Figure 4 multiple examples of plausible but inconsistent annotations on the same pages. diff --git a/test/data/doc/2206.01062.yaml.min.dt b/test/data/doc/2206.01062.yaml.min.dt new file mode 100644 index 00000000..f30e1810 --- /dev/null +++ b/test/data/doc/2206.01062.yaml.min.dt @@ -0,0 +1 @@ +arXiv:2206.01062v1 [cs.CV] 2 Jun 2022DocLayNet: A Large Human-Annotated Dataset for Document-Layout AnalysisBirgit Pfitzmann IBM Research Rueschlikon, Switzerland bpf@zurich.ibm.comChristoph Auer IBM Research Rueschlikon, Switzerland cau@zurich.ibm.comMichele Dolfi IBM Research Rueschlikon, Switzerland dol@zurich.ibm.comAhmed S. Nassar IBM Research Rueschlikon, Switzerland ahn@zurich.ibm.comPeter Staar IBM Research Rueschlikon, Switzerland taa@zurich.ibm.comABSTRACTAccurate document layout analysis is a key requirement for highquality PDF document conversion. With the recent availability of public, large ground-truth datasets such as PubLayNet and DocBank, deep-learning models have proven to be very effective at layout detection and segmentation. While these datasets are of adequate size to train such models, they severely lack in layout variability since they are sourced from scientific article repositories such as PubMed and arXiv only. Consequently, the accuracy of the layout segmentation drops significantly when these models are applied on more challenging and diverse layouts. In this paper, we present DocLayNet , a new, publicly available, document-layout annotation dataset in COCO format. It contains 80863 manually annotated pages from diverse data sources to represent a wide variability in layouts. For each PDF page, the layout annotations provide labelled bounding-boxes with a choice of 11 distinct classes. DocLayNet also provides a subset of double- and triple-annotated pages to determine the inter-annotator agreement. In multiple experiments, we provide baseline accuracy scores (in mAP) for a set of popular object detection models. We also demonstrate that these models fall approximately 10% behind the inter-annotator agreement. Furthermore, we provide evidence that DocLayNet is of sufficient size. Lastly, we compare models trained on PubLayNet, DocBank and DocLayNet, showing that layout predictions of the DocLayNettrained models are more robust and thus the preferred choice for general-purpose document-layout analysis.CCS CONCEPTS· Informationsystems → Documentstructure ; · Appliedcomputing → Document analysis ; · Computing methodologies → Machine learning Computer vision ; ; Object detection ;Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).KDD '22, August 14-18, 2022, Washington, DC, USA© 2022 Copyright held by the owner/author(s).ACM ISBN 978-1-4503-9385-0/22/08.https://doi.org/10.1145/3534678.3539043Figure 1: Four examples of complex page layouts across different document categoriesKEYWORDSPDF document conversion, layout segmentation, object-detection, data set, Machine LearningACMReference Format:Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar. 2022. DocLayNet: A Large Human-Annotated Dataset for DocumentLayout Analysis. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22), August 14-18, 2022, Washington, DC, USA. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/ 3534678.3539043KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar1 INTRODUCTIONDespite the substantial improvements achieved with machine-learning (ML) approaches and deep neural networks in recent years, document conversion remains a challenging problem, as demonstrated by the numerous public competitions held on this topic [1-4]. The challenge originates from the huge variability in PDF documents regarding layout, language and formats (scanned, programmatic or a combination of both). Engineering a single ML model that can be applied on all types of documents and provides high-quality layout segmentation remains to this day extremely challenging [5]. To highlight the variability in document layouts, we show a few example documents from the DocLayNet dataset in Figure 1.Akeyproblem in the process of document conversion is to understand the structure of a single document page, i.e. which segments of text should be grouped together in a unit. To train models for this task, there are currently two large datasets available to the community, PubLayNet [6] and DocBank [7]. They were introduced in 2019 and 2020 respectively and significantly accelerated the implementation of layout detection and segmentation models due to their sizes of 300K and 500K ground-truth pages. These sizes were achieved by leveraging an automation approach. The benefit of automated ground-truth generation is obvious: one can generate large ground-truth datasets at virtually no cost. However, the automation introduces a constraint on the variability in the dataset, because corresponding structured source data must be available. PubLayNet and DocBank were both generated from scientific document repositories (PubMed and arXiv), which provide XML or L A T E X sources. Those scientific documents present a limited variability in their layouts, because they are typeset in uniform templates provided by the publishers. Obviously, documents such as technical manuals, annual company reports, legal text, government tenders, etc. have very different and partially unique layouts. As a consequence, the layout predictions obtained from models trained on PubLayNet or DocBank is very reasonable when applied on scientific documents. However, for more artistic or free-style layouts, we see sub-par prediction quality from these models, which we demonstrate in Section 5.In this paper, we present the DocLayNet dataset. It provides pageby-page layout annotation ground-truth using bounding-boxes for 11 distinct class labels on 80863 unique document pages, of which a fraction carry double- or triple-annotations. DocLayNet is similar in spirit to PubLayNet and DocBank and will likewise be made available to the public 1 in order to stimulate the document-layout analysis community. It distinguishes itself in the following aspects:(1) Human Annotation : In contrast to PubLayNet and DocBank, we relied on human annotation instead of automation approaches to generate the data set.(2) Large Layout Variability : We include diverse and complex layouts from a large variety of public sources.(3) Detailed Label Set : We define 11 class labels to distinguish layout features in high detail. PubLayNet provides 5 labels; DocBank provides 13, although not a superset of ours.(4) Redundant Annotations : A fraction of the pages in the DocLayNet data set carry more than one human annotation.1 https://developer.ibm.com/exchanges/data/all/doclaynetThis enables experimentation with annotation uncertainty and quality control analysis.(5) Pre-defined Train-, Test- & Validation-set : Like DocBank, we provide fixed train-, test- & validation-sets to ensure proportional representation of the class-labels. Further, we prevent leakage of unique layouts across sets, which has a large effect on model accuracy scores.All aspects outlined above are detailed in Section 3. In Section 4, we will elaborate on how we designed and executed this large-scale human annotation campaign. We will also share key insights and lessons learned that might prove helpful for other parties planning to set up annotation campaigns.In Section 5, we will present baseline accuracy numbers for a variety of object detection methods (Faster R-CNN, Mask R-CNN and YOLOv5) trained on DocLayNet. We further show how the model performance is impacted by varying the DocLayNet dataset size, reducing the label set and modifying the train/test-split. Last but not least, we compare the performance of models trained on PubLayNet, DocBank and DocLayNet and demonstrate that a model trained on DocLayNet provides overall more robust layout recovery.2 RELATED WORKWhile early approaches in document-layout analysis used rulebased algorithms and heuristics [8], the problem is lately addressed with deep learning methods. The most common approach is to leverage object detection models [9-15]. In the last decade, the accuracy and speed of these models has increased dramatically. Furthermore, most state-of-the-art object detection methods can be trained and applied with very little work, thanks to a standardisation effort of the ground-truth data format [16] and common deep-learning frameworks [17]. Reference data sets such as PubLayNet [6] and DocBank provide their data in the commonly accepted COCO format [16].Lately, new types of ML models for document-layout analysis have emerged in the community [18-21]. These models do not approach the problem of layout analysis purely based on an image representation of the page, as computer vision methods do. Instead, they combine the text tokens and image representation of a page in order to obtain a segmentation. While the reported accuracies appear to be promising, a broadly accepted data format which links geometric and textual features has yet to establish.3 THE DOCLAYNET DATASETDocLayNet contains 80863 PDF pages. Among these, 7059 carry two instances of human annotations, and 1591 carry three. This amounts to 91104 total annotation instances. The annotations provide layout information in the shape of labeled, rectangular boundingboxes. We define 11 distinct labels for layout features, namely Caption , Footnote , Formula List-item , , Page-footer , Page-header , Picture , Section-header , Table , Text , and Title . Our reasoning for picking this particular label set is detailed in Section 4.In addition to open intellectual property constraints for the source documents, we required that the documents in DocLayNet adhere to a few conditions. Firstly, we kept scanned documentsDocLayNet: A Large Human-Annotated Dataset for Document-Layout AnalysisKDD '22, August 14-18, 2022, Washington, DC, USAFigure 2: Distribution of DocLayNet pages across document categories.to a minimum, since they introduce difficulties in annotation (see Section 4). As a second condition, we focussed on medium to large documents ( > 10 pages) with technical content, dense in complex tables, figures, plots and captions. Such documents carry a lot of information value, but are often hard to analyse with high accuracy due to their challenging layouts. Counterexamples of documents not included in the dataset are receipts, invoices, hand-written documents or photographs showing 'text in the wild".The pages in DocLayNet can be grouped into six distinct categories, namely Financial Reports , Manuals Scientific Articles , , Laws & Regulations , Patents and Government Tenders . Each document category was sourced from various repositories. For example, Financial Reports contain both free-style format annual reports 2 which expose company-specific, artistic layouts as well as the more formal SEC filings. The two largest categories ( Financial Reports and Manuals ) contain a large amount of free-style layouts in order to obtain maximum variability. In the other four categories, we boosted the variability by mixing documents from independent providers, such as different government websites or publishers. In Figure 2, we show the document categories contained in DocLayNet with their respective sizes.We did not control the document selection with regard to language. The vast majority of documents contained in DocLayNet (close to 95%) are published in English language. However, DocLayNet also contains a number of documents in other languages such as German (2.5%), French (1.0%) and Japanese (1.0%). While the document language has negligible impact on the performance of computer vision methods such as object detection and segmentation models, it might prove challenging for layout analysis methods which exploit textual features.To ensure that future benchmarks in the document-layout analysis community can be easily compared, we have split up DocLayNet into pre-defined train-, test- and validation-sets. In this way, we can avoid spurious variations in the evaluation scores due to random splitting in train-, test- and validation-sets. We also ensured that less frequent labels are represented in train and test sets in equal proportions.2 e.g. AAPL from https://www.annualreports.com/Table 1 shows the overall frequency and distribution of the labels among the different sets. Importantly, we ensure that subsets are only split on full-document boundaries. This avoids that pages of the same document are spread over train, test and validation set, which can give an undesired evaluation advantage to models and lead to overestimation of their prediction accuracy. We will show the impact of this decision in Section 5.In order to accommodate the different types of models currently in use by the community, we provide DocLayNet in an augmented COCO format [16]. This entails the standard COCO ground-truth file (in JSON format) with the associated page images (in PNG format, 1025 × 1025 pixels). Furthermore, custom fields have been added to each COCO record to specify document category, original document filename and page number. In addition, we also provide the original PDF pages, as well as sidecar files containing parsed PDF text and text-cell coordinates (in JSON). All additional files are linked to the primary page images by their matching filenames.Despite being cost-intense and far less scalable than automation, human annotation has several benefits over automated groundtruth generation. The first and most obvious reason to leverage human annotations is the freedom to annotate any type of document without requiring a programmatic source. For most PDF documents, the original source document is not available. The latter is not a hard constraint with human annotation, but it is for automated methods. A second reason to use human annotations is that the latter usually provide a more natural interpretation of the page layout. The human-interpreted layout can significantly deviate from the programmatic layout used in typesetting. For example, 'invisible' tables might be used solely for aligning text paragraphs on columns. Such typesetting tricks might be interpreted by automated methods incorrectly as an actual table, while the human annotation will interpret it correctly as Text or other styles. The same applies to multi-line text elements, when authors decided to space them as 'invisible' list elements without bullet symbols. A third reason to gather ground-truth through human annotation is to estimate a 'natural' upper bound on the segmentation accuracy. As we will show in Section 4, certain documents featuring complex layouts can have different but equally acceptable layout interpretations. This natural upper bound for segmentation accuracy can be found by annotating the same pages multiple times by different people and evaluating the inter-annotator agreement. Such a baseline consistency evaluation is very useful to define expectations for a good target accuracy in trained deep neural network models and avoid overfitting (see Table 1). On the flip side, achieving high annotation consistency proved to be a key challenge in human annotation, as we outline in Section 4.4 ANNOTATION CAMPAIGNThe annotation campaign was carried out in four phases. In phase one, we identified and prepared the data sources for annotation. In phase two, we determined the class labels and how annotations should be done on the documents in order to obtain maximum consistency. The latter was guided by a detailed requirement analysis and exhaustive experiments. In phase three, we trained the annotation staff and performed exams for quality assurance. In phase four,KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter Staar% of Totaltriple inter-annotator mAP @ 0.5-0.95 (%)class labelCountTrainTestValAllFinManSciLawPatTenCaption225242.041.772.3284-8940-6186-9294-9995-9969-78n/aFootnote63180.600.310.5883-91n/a10062-8885-94n/a82-97Formula250272.251.902.9683-85n/an/a84-8786-96n/an/aList-item18566017.1913.3415.8287-8874-8390-9297-9781-8575-8893-95Page-footer708786.515.586.0093-9488-9095-9610092-9710096-98Page-header580225.106.705.0685-8966-7690-9498-10091-9297-9981-86Picture459764.212.785.3169-7156-5982-8669-8280-9566-7159-76Section-header14288412.6015.7712.8583-8476-8190-9294-9587-9469-7378-86Table347333.202.273.6077-8175-8083-8698-9958-8079-8470-85Text51037745.8249.2845.0084-8681-8688-9389-9387-9271-7987-95Title50710.470.300.5060-7224-6350-6394-10082-9668-7924-56Total1107470941123998166653182-8371-7479-8189-9486-9171-7668-85Table 1: DocLayNet dataset overview. Along with the frequency of each class label, we present the relative occurrence (as % of row 'Total') in the train, test and validation sets. The inter-annotator agreement is computed as the mAP@0.5-0.95 metric between pairwise annotations from the triple-annotated pages, from which we obtain accuracy ranges.Figure 3: Corpus Conversion Service annotation user interface. The PDF page is shown in the background, with overlaid text-cells (in darker shades). The annotation boxes can be drawn by dragging a rectangle over each segment with the respective label from the palette on the right.we distributed the annotation workload and performed continuous quality controls. Phase one and two required a small team of experts only. For phases three and four, a group of 40 dedicated annotators were assembled and supervised.Phase 1: Data selection and preparation. Our inclusion criteria for documents were described in Section 3. A large effort went into ensuring that all documents are free to use. The data sources include publication repositories such as arXiv 3 , government offices, company websites as well as data directory services for financial reports and patents. Scanned documents were excluded wherever possible because they can be rotated or skewed. This would not allow us to perform annotation with rectangular bounding-boxes and therefore complicate the annotation process.Preparation work included uploading and parsing the sourced PDF documents in the Corpus Conversion Service (CCS) [22], a cloud-native platform which provides a visual annotation interface and allows for dataset inspection and analysis. The annotation interface of CCS is shown in Figure 3. The desired balance of pages between the different document categories was achieved by selective subsampling of pages with certain desired properties. For example, we made sure to include the title page of each document and bias the remaining page selection to those with figures or tables. The latter was achieved by leveraging pre-trained object detection models from PubLayNet, which helped us estimate how many figures and tables a given page contains.Phase 2: Label selection and guideline. We reviewed the collected documents and identified the most common structural features they exhibit. This was achieved by identifying recurrent layout elements and lead us to the definition of 11 distinct class labels. These 11 class labels are Caption , Footnote , Formula List-item , , Pagefooter , Page-header , Picture , Section-header , Table , Text , and Title . Critical factors that were considered for the choice of these class labels were (1) the overall occurrence of the label, (2) the specificity of the label, (3) recognisability on a single page (i.e. no need for context from previous or next page) and (4) overall coverage of the page. Specificity ensures that the choice of label is not ambiguous, while coverage ensures that all meaningful items on a page can be annotated. We refrained from class labels that are very specific to a document category, such as Abstract in the Scientific Articles category. We also avoided class labels that are tightly linked to the semantics of the text. Labels such as Author and Affiliation , as seen in DocBank, are often only distinguishable by discriminating on3 https://arxiv.org/DocLayNet: A Large Human-Annotated Dataset for Document-Layout AnalysisKDD '22, August 14-18, 2022, Washington, DC, USAthe textual content of an element, which goes beyond visual layout recognition, in particular outside the Scientific Articles category.At first sight, the task of visual document-layout interpretation appears intuitive enough to obtain plausible annotations in most cases. However, during early trial-runs in the core team, we observed many cases in which annotators use different annotation styles, especially for documents with challenging layouts. For example, if a figure is presented with subfigures, one annotator might draw a single figure bounding-box, while another might annotate each subfigure separately. The same applies for lists, where one might annotate all list items in one block or each list item separately. In essence, we observed that challenging layouts would be annotated in different but plausible ways. To illustrate this, we show in Figure 4 multiple examples of plausible but inconsistent annotations on the same pages.Obviously, this inconsistency in annotations is not desirable for datasets which are intended to be used for model training. To minimise these inconsistencies, we created a detailed annotation guideline. While perfect consistency across 40 annotation staff members is clearly not possible to achieve, we saw a huge improvement in annotation consistency after the introduction of our annotation guideline. A few selected, non-trivial highlights of the guideline are:(1) Every list-item is an individual object instance with class label List-item . This definition is different from PubLayNet and DocBank, where all list-items are grouped together into one List object.(2) A List-item is a paragraph with hanging indentation. Singleline elements can qualify as List-item if the neighbour elements expose hanging indentation. Bullet or enumeration symbols are not a requirement.(3) For every Caption , there must be exactly one corresponding Picture or Table .(4) Connected sub-pictures are grouped together in one Picture object.(5) Formula numbers are included in a Formula object.(6) Emphasised text (e.g. in italic or bold) at the beginning of a paragraph is not considered a Section-header , unless it appears exclusively on its own line.The complete annotation guideline is over 100 pages long and a detailed description is obviously out of scope for this paper. Nevertheless, it will be made publicly available alongside with DocLayNet for future reference.Phase 3: Training. After a first trial with a small group of people, we realised that providing the annotation guideline and a set of random practice pages did not yield the desired quality level for layout annotation. Therefore we prepared a subset of pages with two different complexity levels, each with a practice and an exam part. 974 pages were reference-annotated by one proficient core team member. Annotation staff were then given the task to annotate the same subsets (blinded from the reference). By comparing the annotations of each staff member with the reference annotations, we could quantify how closely their annotations matched the reference. Only after passing two exam levels with high annotation quality, staff were admitted into the production phase. Practice iterations05237a14f2524e3f53c8454b074409d05078038a6a36b770fcc8ec7e540deae0were carried out over a timeframe of 12 weeks, after which 8 of the 40 initially allocated annotators did not pass the bar.Phase 4: Production annotation. The previously selected 80K pages were annotated with the defined 11 class labels by 32 annotators. This production phase took around three months to complete. All annotations were created online through CCS, which visualises the programmatic PDF text-cells as an overlay on the page. The page annotation are obtained by drawing rectangular bounding-boxes, as shown in Figure 3. With regard to the annotation practices, we implemented a few constraints and capabilities on the tooling level. First, we only allow non-overlapping, vertically oriented, rectangular boxes. For the large majority of documents, this constraint was sufficient and it speeds up the annotation considerably in comparison with arbitrary segmentation shapes. Second, annotator staff were not able to see each other's annotations. This was enforced by design to avoid any bias in the annotation, which could skew the numbers of the inter-annotator agreement (see Table 1). We wantedKDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter StaarTable 2: Prediction performance (mAP@0.5-0.95) of object detection networks on DocLayNet test set. The MRCNN (Mask R-CNN) and FRCNN (Faster R-CNN) models with ResNet-50 or ResNet-101 backbone were trained based on the network architectures from the detectron2 model zoo (Mask R-CNN R50, R101-FPN 3x, Faster R-CNN R101-FPN 3x), with default configurations. The YOLO implementation utilized was YOLOv5x6 [13]. All models were initialised using pre-trained weights from the COCO 2017 dataset.humanMRCNNFRCNNYOLOR50R101R101v5x6Caption84-8968.471.570.177.7Footnote83-9170.971.873.777.2Formula83-8560.163.463.566.2List-item87-8881.280.881.086.2Page-footer93-9461.659.358.961.1Page-header85-8971.970.072.067.9Picture69-7171.772.772.077.1Section-header83-8467.669.368.474.6Table77-8182.282.982.286.3Text84-8684.685.885.488.1Title60-7276.780.479.982.7All82-8372.473.573.476.8to avoid this at any cost in order to have clear, unbiased baseline numbers for human document-layout annotation. Third, we introduced the feature of snapping boxes around text segments to obtain a pixel-accurate annotation and again reduce time and effort. The CCS annotation tool automatically shrinks every user-drawn box to the minimum bounding-box around the enclosed text-cells for all purely text-based segments, which excludes only Table and Picture . For the latter, we instructed annotation staff to minimise inclusion of surrounding whitespace while including all graphical lines. A downside of snapping boxes to enclosed text cells is that some wrongly parsed PDF pages cannot be annotated correctly and need to be skipped. Fourth, we established a way to flag pages as rejected for cases where no valid annotation according to the label guidelines could be achieved. Example cases for this would be PDF pages that render incorrectly or contain layouts that are impossible to capture with non-overlapping rectangles. Such rejected pages are not contained in the final dataset. With all these measures in place, experienced annotation staff managed to annotate a single page in a typical timeframe of 20s to 60s, depending on its complexity.5 EXPERIMENTSThe primary goal of DocLayNet is to obtain high-quality ML models capable of accurate document-layout analysis on a wide variety of challenging layouts. As discussed in Section 2, object detection models are currently the easiest to use, due to the standardisation of ground-truth data in COCO format [16] and the availability of general frameworks such as detectron2 [17]. Furthermore, baseline numbers in PubLayNet and DocBank were obtained using standard object detection models such as Mask R-CNN and Faster R-CNN. As such, we will relate to these object detection methods in thisFigure 5: Prediction performance (mAP@0.5-0.95) of a Mask R-CNNnetworkwithResNet50backbonetrainedonincreasing fractions of the DocLayNet dataset. The learning curve flattens around the 80% mark, indicating that increasing the size of the DocLayNet dataset with similar data will not yield significantly better predictions.paper and leave the detailed evaluation of more recent methods mentioned in Section 2 for future work.In this section, we will present several aspects related to the performance of object detection models on DocLayNet. Similarly as in PubLayNet, we will evaluate the quality of their predictions using mean average precision (mAP) with 10 overlaps that range from 0.5 to 0.95 in steps of 0.05 (mAP@0.5-0.95). These scores are computed by leveraging the evaluation code provided by the COCO API [16].Baselines for Object DetectionIn Table 2, we present baseline experiments (given in mAP) on Mask R-CNN [12], Faster R-CNN [11], and YOLOv5 [13]. Both training and evaluation were performed on RGB images with dimensions of 1025 × 1025 pixels. For training, we only used one annotation in case of redundantly annotated pages. As one can observe, the variation in mAP between the models is rather low, but overall between 6 and 10% lower than the mAP computed from the pairwise human annotations on triple-annotated pages. This gives a good indication that the DocLayNet dataset poses a worthwhile challenge for the research community to close the gap between human recognition and ML approaches. It is interesting to see that Mask R-CNN and Faster R-CNN produce very comparable mAP scores, indicating that pixel-based image segmentation derived from bounding-boxes does not help to obtain better predictions. On the other hand, the more recent Yolov5x model does very well and even out-performs humans on selected labels such as Text , Table and Picture . This is not entirely surprising, as Text , Table and Picture are abundant and the most visually distinctive in a document.DocLayNet: A Large Human-Annotated Dataset for Document-Layout AnalysisKDD '22, August 14-18, 2022, Washington, DC, USATable 3: Performance of a Mask R-CNN R50 network in mAP@0.5-0.95 scores trained on DocLayNet with different class label sets. The reduced label sets were obtained by either down-mapping or dropping labels.Class-count11654Caption68TextTextTextFootnote71TextTextTextFormula60TextTextTextList-item81Text82TextPage-footer6262--Page-header7268--Picture72727272Section-header68676968Table82838282Text85848484Title77Sec.-h.Sec.-h.Sec.-h.Overall72737877Learning CurveOne of the fundamental questions related to any dataset is if it is 'large enough'. To answer this question for DocLayNet, we performed a data ablation study in which we evaluated a Mask R-CNN model trained on increasing fractions of the DocLayNet dataset. As can be seen in Figure 5, the mAP score rises sharply in the beginning and eventually levels out. To estimate the error-bar on the metrics, we ran the training five times on the entire data-set. This resulted in a 1% error-bar, depicted by the shaded area in Figure 5. In the inset of Figure 5, we show the exact same data-points, but with a logarithmic scale on the x-axis. As is expected, the mAP score increases linearly as a function of the data-size in the inset. The curve ultimately flattens out between the 80% and 100% mark, with the 80% mark falling within the error-bars of the 100% mark. This provides a good indication that the model would not improve significantly by yet increasing the data size. Rather, it would probably benefit more from improved data consistency (as discussed in Section 3), data augmentation methods [23], or the addition of more document categories and styles.Impact of Class LabelsThe choice and number of labels can have a significant effect on the overall model performance. Since PubLayNet, DocBank and DocLayNet all have different label sets, it is of particular interest to understand and quantify this influence of the label set on the model performance. We investigate this by either down-mapping labels into more common ones (e.g. Caption → Text ) or excluding them from the annotations entirely. Furthermore, it must be stressed that all mappings and exclusions were performed on the data before model training. In Table 3, we present the mAP scores for a Mask R-CNN R50 network on different label sets. Where a label is down-mapped, we show its corresponding label, otherwise it was excluded. We present three different label sets, with 6, 5 and 4 different labels respectively. The set of 5 labels contains the same labels as PubLayNet. However, due to the different definition ofTable 4: Performance of a Mask R-CNN R50 network with document-wise and page-wise split for different label sets. Naive page-wise split will result in /tildelow 10% point improvement.Class-count115SplitDocPageDocPageCaption6883Footnote7184Formula6066List-item81888288Page-footer6289Page-header7290Picture72827282Section-header68836983Table82898290Text85918490Title7781All72847887lists in PubLayNet (grouped list-items) versus DocLayNet (separate list-items), the label set of size 4 is the closest to PubLayNet, in the assumption that the List is down-mapped to Text in PubLayNet. The results in Table 3 show that the prediction accuracy on the remaining class labels does not change significantly when other classes are merged into them. The overall macro-average improves by around 5%, in particular when Page-footer and Page-header are excluded.Impact of Document Split in Train and Test SetMany documents in DocLayNet have a unique styling. In order to avoid overfitting on a particular style, we have split the train-, test- and validation-sets of DocLayNet on document boundaries, i.e. every document contributes pages to only one set. To the best of our knowledge, this was not considered in PubLayNet or DocBank. To quantify how this affects model performance, we trained and evaluated a Mask R-CNN R50 model on a modified dataset version. Here, the train-, test- and validation-sets were obtained by a randomised draw over the individual pages. As can be seen in Table 4, the difference in model performance is surprisingly large: pagewise splitting gains ˜ 0% in mAP over the document-wise splitting. 1 Thus, random page-wise splitting of DocLayNet can easily lead to accidental overestimation of model performance and should be avoided.Dataset ComparisonThroughout this paper, we claim that DocLayNet's wider variety of document layouts leads to more robust layout detection models. In Table 5, we provide evidence for that. We trained models on each of the available datasets (PubLayNet, DocBank and DocLayNet) and evaluated them on the test sets of the other datasets. Due to the different label sets and annotation styles, a direct comparison is not possible. Hence, we focussed on the common labels among the datasets. Between PubLayNet and DocLayNet, these are Picture ,KDD '22, August 14-18, 2022, Washington, DC, USA Birgit Pfitzmann, Christoph Auer, Michele Dolfi, Ahmed S. Nassar, and Peter StaarTable 5: Prediction Performance (mAP@0.5-0.95) of a Mask R-CNN R50 network across the PubLayNet, DocBank & DocLayNet data-sets. By evaluating on common label classes of each dataset, we observe that the DocLayNet-trained model has much less pronounced variations in performance across all datasets.Testing onTraining onlabelsPLNDBDLNPubLayNet (PLN)Figure964323Sec-header87-32Table952449Text96-42total933430DocBank (DB)Figure777131Table196522total486827DocLayNet (DLN)Figure675172Sec-header53-68Table874382Text77-84total594778Section-header , Table and Text . Before training, we either mapped or excluded DocLayNet's other labels as specified in table 3, and also PubLayNet's List to Text . Note that the different clustering of lists (by list-element vs. whole list objects) naturally decreases the mAP score for Text .For comparison of DocBank with DocLayNet, we trained only on Picture and Table clusters of each dataset. We had to exclude Text because successive paragraphs are often grouped together into a single object in DocBank. This paragraph grouping is incompatible with the individual paragraphs of DocLayNet. As can be seen in Table 5, DocLayNet trained models yield better performance compared to the previous datasets. It is noteworthy that the models trained on PubLayNet and DocBank perform very well on their own test set, but have a much lower performance on the foreign datasets. While this also applies to DocLayNet, the difference is far less pronounced. Thus we conclude that DocLayNet trained models are overall more robust and will produce better results for challenging, unseen layouts.Example PredictionsTo conclude this section, we illustrate the quality of layout predictions one can expect from DocLayNet-trained models by providing a selection of examples without any further post-processing applied. Figure 6 shows selected layout predictions on pages from the test-set of DocLayNet. Results look decent in general across document categories, however one can also observe mistakes such as overlapping clusters of different classes, or entirely missing boxes due to low confidence.6 CONCLUSIONIn this paper, we presented the DocLayNet dataset. It provides the document conversion and layout analysis research community a new and challenging dataset to improve and fine-tune novel ML methods on. In contrast to many other datasets, DocLayNet was created by human annotation in order to obtain reliable layout ground-truth on a wide variety of publication- and typesettingstyles. Including a large proportion of documents outside the scientific publishing domain adds significant value in this respect.From the dataset, we have derived on the one hand reference metrics for human performance on document-layout annotation (through double and triple annotations) and on the other hand evaluated the baseline performance of commonly used object detection methods. We also illustrated the impact of various dataset-related aspects on model performance through data-ablation experiments, both from a size and class-label perspective. Last but not least, we compared the accuracy of models trained on other public datasets and showed that DocLayNet trained models are more robust.To date, there is still a significant gap between human and ML accuracy on the layout interpretation task, and we hope that this work will inspire the research community to close that gap.REFERENCES[1] Max Göbel, Tamir Hassan, Ermelinda Oro, and Giorgio Orsi. Icdar 2013 table competition. In 2013 12th International Conference on Document Analysis and Recognition , pages 1449-1453, 2013.[2] Christian Clausner, Apostolos Antonacopoulos, and Stefan Pletschacher. Icdar2017 competition on recognition of documents with complex layouts rdcl2017. In 2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR) , volume 01, pages 1404-1410, 2017.[3] Hervé Déjean, Jean-Luc Meunier, Liangcai Gao, Yilun Huang, Yu Fang, Florian Kleber, and Eva-Maria Lang. ICDAR 2019 Competition on Table Detection and Recognition (cTDaR), April 2019. http://sac.founderit.com/.[4] Antonio Jimeno Yepes, Peter Zhong, and Douglas Burdick. Competition on scientific literature parsing. In Proceedings of the International Conference on Document Analysis and Recognition , ICDAR, pages 605-617. LNCS 12824, SpringerVerlag, sep 2021.[5] Logan Markewich, Hao Zhang, Yubin Xing, Navid Lambert-Shirzad, Jiang Zhexin, Roy Lee, Zhi Li, and Seok-Bum Ko. Segmentation for document layout analysis: not dead yet. International Journal on Document Analysis and Recognition (IJDAR) , pages 1-11, 01 2022.[6] Xu Zhong, Jianbin Tang, and Antonio Jimeno-Yepes. Publaynet: Largest dataset ever for document layout analysis. In Proceedings of the International Conference on Document Analysis and Recognition , ICDAR, pages 1015-1022, sep 2019.[7] Minghao Li, Yiheng Xu, Lei Cui, Shaohan Huang, Furu Wei, Zhoujun Li, and Ming Zhou. Docbank: A benchmark dataset for document layout analysis. In Proceedings of the 28th International Conference on Computational Linguistics , COLING, pages 949-960. International Committee on Computational Linguistics, dec 2020.[8] Riaz Ahmad, Muhammad Tanvir Afzal, and M. Qadir. Information extraction from pdf sources based on rule-based system using integrated formats. In SemWebEval@ESWC , 2016.[9] Ross B. Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In IEEE Conference on Computer Vision and Pattern Recognition , CVPR, pages 580-587. IEEE Computer Society, jun 2014.[10] Ross B. Girshick. Fast R-CNN. In 2015 IEEE International Conference on Computer Vision , ICCV, pages 1440-1448. IEEE Computer Society, dec 2015.[11] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. IEEE Transactions on Pattern Analysis and Machine Intelligence , 39(6):1137-1149, 2017.[12] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross B. Girshick. Mask R-CNN. In IEEE International Conference on Computer Vision , ICCV, pages 2980-2988. IEEE Computer Society, Oct 2017.[13] Glenn Jocher, Alex Stoken, Ayush Chaurasia, Jirka Borovec, NanoCode012, TaoXie, Yonghye Kwon, Kalen Michael, Liu Changyu, Jiacong Fang, Abhiram V, Laughing, tkianai, yxNONG, Piotr Skalski, Adam Hogan, Jebastin Nadar, imyhxy, Lorenzo Mammana, Alex Wang, Cristi Fati, Diego Montes, Jan Hajek, LaurentiuDocLayNet: A Large Human-Annotated Dataset for Document-Layout AnalysisKDD '22, August 14-18, 2022, Washington, DC, USAText Caption List-Item Formula Table Section-Header Picture Page-Header Page-Footer TitleFigure 6: Example layout predictions on selected pages from the DocLayNet test-set. (A, D) exhibit favourable results on coloured backgrounds. (B, C) show accurate list-item and paragraph differentiation despite densely-spaced lines. (E) demonstrates good table and figure distinction. (F) shows predictions on a Chinese patent with multiple overlaps, label confusion and missing boxes.Diaconu, Mai Thanh Minh, Marc, albinxavi, fatih, oleg, and wanghao yang. ultralytics/yolov5: v6.0 - yolov5n nano models, roboflow integration, tensorflow export, opencv dnn support, October 2021.[20] Shoubin Li, Xuyan Ma, Shuaiqun Pan, Jun Hu, Lin Shi, and Qing Wang. Vtlayout: Fusion of visual and text features for document layout analysis, 2021.[14] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. CoRR , abs/2005.12872, 2020.[15] Mingxing Tan, Ruoming Pang, and Quoc V. Le. Efficientdet: Scalable and efficient object detection. CoRR , abs/1911.09070, 2019.[16] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár, and C. Lawrence Zitnick. Microsoft COCO: common objects in context, 2014.[17] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detectron2, 2019.[18] Nikolaos Livathinos, Cesar Berrospi, Maksym Lysak, Viktor Kuropiatnyk, Ahmed Nassar, Andre Carvalho, Michele Dolfi, Christoph Auer, Kasper Dinkla, and Peter W. J. Staar. Robust pdf document conversion using recurrent neural networks. In Proceedings of the 35th Conference on Artificial Intelligence , AAAI, pages 1513715145, feb 2021.[19] Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and Ming Zhou. Layoutlm: Pre-training of text and layout for document image understanding. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 1192-1200, New York, USA, 2020. Association for Computing Machinery.[21] Peng Zhang, Can Li, Liang Qiao, Zhanzhan Cheng, Shiliang Pu, Yi Niu, and Fei Wu. Vsr: A unified framework for document layout analysis combining vision, semantics and relations, 2021.[22] Peter W J Staar, Michele Dolfi, Christoph Auer, and Costas Bekas. Corpus conversion service: A machine learning platform to ingest documents at scale. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining , KDD, pages 774-782. ACM, 2018.[23] Connor Shorten and Taghi M. Khoshgoftaar. A survey on image data augmentation for deep learning. Journal of Big Data , 6(1):60, 2019. diff --git a/test/data/doc/constructed_doc.dt b/test/data/doc/constructed_doc.dt index bc90fee1..ed3e994f 100644 --- a/test/data/doc/constructed_doc.dt +++ b/test/data/doc/constructed_doc.dt @@ -10,39 +10,40 @@ Affiliation 2 list item 1 list item 2 list item 3 -list item 3.a +list item 3.a list item 3.b list item 3.c -list item 3.c.i - - - +list item 3.c.i + + list item 4 + ProductYears20162017Apple49823695944This is the caption of table 1. This is the caption of figure 1. This is the caption of figure 2. item 1 of list -item 1 of list after empty list +item 1 of list after empty list item 2 of list after empty list item 1 of neighboring list item 2 of neighboring list -item 1 of sub list -Here a code snippet: +item 1 of sub list +Here a code snippet: <_unknown_>

Hello world

(to be displayed inline) -
-Here a formula: +
+Here a formula: E=mc^2 (to be displayed inline) -
+ +
Here a code block: <_unknown_>print("Hello world") Here a formula block: E=mc^2 -Some formatting chops: +Some formatting chops: bold italic underline @@ -50,17 +51,18 @@ Affiliation 2 hyperlink & everything at the same time. + Item 1 in A Item 2 in A Item 3 in A -Item 1 in B +Item 1 in B Item 2 in B -Item 1 in C +Item 1 in C Item 2 in C - - + Item 3 in B - + Item 4 in A + The end. \ No newline at end of file diff --git a/test/data/doc/constructed_doc.dt.gt b/test/data/doc/constructed_doc.dt.gt index bc90fee1..ed3e994f 100644 --- a/test/data/doc/constructed_doc.dt.gt +++ b/test/data/doc/constructed_doc.dt.gt @@ -10,39 +10,40 @@ Affiliation 2 list item 1 list item 2 list item 3 -list item 3.a +list item 3.a list item 3.b list item 3.c -list item 3.c.i - - - +list item 3.c.i + + list item 4 +
ProductYears20162017Apple49823695944This is the caption of table 1. This is the caption of figure 1. This is the caption of figure 2. item 1 of list -item 1 of list after empty list +item 1 of list after empty list item 2 of list after empty list item 1 of neighboring list item 2 of neighboring list -item 1 of sub list -Here a code snippet: +item 1 of sub list +Here a code snippet: <_unknown_>

Hello world

(to be displayed inline) -
-Here a formula: +
+Here a formula: E=mc^2 (to be displayed inline) -
+ +
Here a code block: <_unknown_>print("Hello world") Here a formula block: E=mc^2 -Some formatting chops: +Some formatting chops: bold italic underline @@ -50,17 +51,18 @@ Affiliation 2 hyperlink & everything at the same time. + Item 1 in A Item 2 in A Item 3 in A -Item 1 in B +Item 1 in B Item 2 in B -Item 1 in C +Item 1 in C Item 2 in C - - + Item 3 in B - + Item 4 in A + The end. \ No newline at end of file diff --git a/test/data/doc/constructed_document.yaml.dt b/test/data/doc/constructed_document.yaml.dt index bddeda65..9d513d60 100644 --- a/test/data/doc/constructed_document.yaml.dt +++ b/test/data/doc/constructed_document.yaml.dt @@ -10,39 +10,40 @@ Affiliation 2 list item 1 list item 2 list item 3 -list item 3.a +list item 3.a list item 3.b list item 3.c -list item 3.c.i - - - +list item 3.c.i + + list item 4 +
ProductYears20162017Apple49823695944This is the caption of table 1. This is the caption of figure 1. This is the caption of figure 2. item 1 of list -item 1 of list after empty list +item 1 of list after empty list item 2 of list after empty list item 1 of neighboring list item 2 of neighboring list -item 1 of sub list -Here a code snippet: +item 1 of sub list +Here a code snippet: <_unknown_>

Hello world

(to be displayed inline) -
-Here a formula: +
+Here a formula: E=mc^2 (to be displayed inline) -
+ +
Here a code block: <_unknown_>print("Hello world") Here a formula block: E=mc^2 -Some formatting chops: +Some formatting chops: bold italic underline @@ -50,17 +51,18 @@ Affiliation 2 hyperlink & everything at the same time. + Item 1 in A Item 2 in A Item 3 in A -Item 1 in B +Item 1 in B Item 2 in B -Item 1 in C +Item 1 in C Item 2 in C - - + Item 3 in B - + Item 4 in A + The end. diff --git a/test/data/doc/dummy_doc.yaml b/test/data/doc/dummy_doc.yaml index ddd13ba7..c72d82e1 100644 --- a/test/data/doc/dummy_doc.yaml +++ b/test/data/doc/dummy_doc.yaml @@ -138,7 +138,7 @@ pictures: # All pictures... - kind: classification provenance: model1 predicted_classes: - - class_name: "illustration" + - class_name: "bar_chart" confidence: 0.78 - kind: description text: "..." diff --git a/test/data/doc/dummy_doc.yaml.dt b/test/data/doc/dummy_doc.yaml.dt index 57bf45b3..15e2bec0 100644 --- a/test/data/doc/dummy_doc.yaml.dt +++ b/test/data/doc/dummy_doc.yaml.dt @@ -1,4 +1,4 @@ <loc_42><loc_26><loc_406><loc_46>DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis -CC1=NNC(C2=CN3C=CN=C3C(CC3=CC(F)=CC(F)=C3)=N2)=N1Figure 1: Four examples of complex page layouts across different document categories +CC1=NNC(C2=CN3C=CN=C3C(CC3=CC(F)=CC(F)=C3)=N2)=N1Figure 1: Four examples of complex page layouts across different document categories diff --git a/test/data/doc/dummy_doc.yaml.min.dt b/test/data/doc/dummy_doc.yaml.min.dt new file mode 100644 index 00000000..28cdc002 --- /dev/null +++ b/test/data/doc/dummy_doc.yaml.min.dt @@ -0,0 +1 @@ +<loc_42><loc_26><loc_406><loc_46>DocLayNet: A Large Human-Annotated Dataset for Document-Layout AnalysisCC1=NNC(C2=CN3C=CN=C3C(CC3=CC(F)=CC(F)=C3)=N2)=N1Figure 1: Four examples of complex page layouts across different document categories diff --git a/test/test_docling_doc.py b/test/test_docling_doc.py index 33c0f62f..e785c2f2 100644 --- a/test/test_docling_doc.py +++ b/test/test_docling_doc.py @@ -10,6 +10,10 @@ from PIL import ImageDraw from pydantic import AnyUrl, ValidationError +from docling_core.experimental.serializer.doctags import ( + DocTagsDocSerializer, + DocTagsParams, +) from docling_core.types.doc.base import BoundingBox, CoordOrigin, ImageRefMode, Size from docling_core.types.doc.document import ( # BoundingBox, CURRENT_VERSION, @@ -504,6 +508,15 @@ def _test_export_methods(doc: DoclingDocument, filename: str): dt_pred = doc.export_to_document_tokens() _verify_regression_test(dt_pred, filename=filename, ext="dt") + ser = DocTagsDocSerializer( + doc=doc, + params=DocTagsParams( + mode=DocTagsParams.Mode.MINIFIED, + ), + ) + dt_min_pred = ser.serialize().text + _verify_regression_test(dt_min_pred, filename=filename, ext="min.dt") + # Test Tables export ... for table in doc.tables: table.export_to_markdown()