Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
jackdewinter committed Jul 5, 2024
1 parent 16c8825 commit b3d0404
Show file tree
Hide file tree
Showing 2 changed files with 109 additions and 11 deletions.
59 changes: 49 additions & 10 deletions pymarkdown/leaf_blocks/thematic_leaf_block_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""

import logging
from typing import List, Optional, Tuple
from typing import List, Optional, Tuple, cast

from pymarkdown.block_quotes.block_quote_data import BlockQuoteData
from pymarkdown.container_blocks.container_grab_bag import ContainerGrabBag
Expand All @@ -14,8 +14,13 @@
from pymarkdown.general.position_marker import PositionMarker
from pymarkdown.general.tab_helper import TabHelper
from pymarkdown.leaf_blocks.leaf_block_helper import LeafBlockHelper
from pymarkdown.tokens.list_start_markdown_token import ListStartMarkdownToken
from pymarkdown.tokens.markdown_token import MarkdownToken
from pymarkdown.tokens.stack_token import ParagraphStackToken, StackToken
from pymarkdown.tokens.stack_token import (
ListStackToken,
ParagraphStackToken,
StackToken,
)
from pymarkdown.tokens.thematic_break_markdown_token import ThematicBreakMarkdownToken

POGGER = ParserLogger(logging.getLogger(__name__))
Expand Down Expand Up @@ -83,6 +88,46 @@ def is_thematic_break(

return thematic_break_character, end_of_break_index

@staticmethod
def __handle_existing_paragraph_special(parser_state:ParserState, grab_bag:ContainerGrabBag, new_tokens:List[MarkdownToken]) -> None:
if parser_state.token_stack[-1].is_list and grab_bag.text_removed_by_container is not None:
stack_list_token = cast(ListStackToken, parser_state.token_stack[-1])
indent_delta = stack_list_token.indent_level - len(
grab_bag.text_removed_by_container
)
if indent_delta > 0:
closed_tokens, _ = parser_state.close_open_blocks_fn(
parser_state,
was_forced=True,
include_lists=True,
until_this_index=len(parser_state.token_stack) - 1,
)
new_tokens.extend(closed_tokens)
assert parser_state.token_stack[-1].is_list
list_token = cast(
ListStartMarkdownToken,
parser_state.token_stack[-1].matching_markdown_token,
)
list_token.add_leading_spaces(" " * indent_delta)

@staticmethod
def __handle_existing_paragraph(
parser_state:ParserState, grab_bag:ContainerGrabBag, new_tokens:List[MarkdownToken], block_quote_data:BlockQuoteData
) -> List[MarkdownToken]:
force_paragraph_close_if_present = (
block_quote_data.current_count == 0 and block_quote_data.stack_count > 0
)
new_tokens, _ = parser_state.close_open_blocks_fn(
parser_state,
only_these_blocks=[ParagraphStackToken],
was_forced=force_paragraph_close_if_present,
)
if new_tokens and grab_bag.text_removed_by_container:
ThematicLeafBlockProcessor.__handle_existing_paragraph_special(
parser_state, grab_bag, new_tokens
)
return new_tokens

@staticmethod
def parse_thematic_break(
parser_state: ParserState,
Expand All @@ -109,14 +154,8 @@ def parse_thematic_break(
"parse_thematic_break>>start",
)
if parser_state.token_stack[-1].is_paragraph:
force_paragraph_close_if_present = (
block_quote_data.current_count == 0
and block_quote_data.stack_count > 0
)
new_tokens, _ = parser_state.close_open_blocks_fn(
parser_state,
only_these_blocks=[ParagraphStackToken],
was_forced=force_paragraph_close_if_present,
new_tokens = ThematicLeafBlockProcessor.__handle_existing_paragraph(
parser_state, grab_bag, new_tokens, block_quote_data
)

token_text = position_marker.text_to_parse[
Expand Down
61 changes: 60 additions & 1 deletion test/test_markdown_extra.py
Original file line number Diff line number Diff line change
Expand Up @@ -3275,7 +3275,7 @@ def test_extra_025cxz():
</blockquote>"""

# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens, show_debug=False)
act_and_assert(source_markdown, expected_gfm, expected_tokens)


@pytest.mark.gfm
Expand Down Expand Up @@ -6047,6 +6047,65 @@ def test_extra_043a():
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)

@pytest.mark.gfm
def test_extra_044c():
"""
TBD
"""

# Arrange
source_markdown = """> + list 1
> + list 2
> list 3
> ------
> ```block
> A code block
> ```
> ------
> + another list
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> \n> \n> \n> \n> ]",
"[ulist(1,3):+::4:: \n \n \n \n \n]",
"[para(1,5):]",
"[text(1,5):list 1:]",
"[end-para:::True]",
"[ulist(2,5):+::6: : \n ]",
"[para(2,7):\n]",
"[text(2,7):list 2\nlist 3::\n]",
"[end-para:::False]",
"[end-ulist:::True]",
"[tbreak(4,5):-::------]",
"[fcode-block(5,5):`:3:block:::::]",
"[text(6,3):A code block:]",
"[end-fcode-block:::3:False]",
"[tbreak(8,5):-::------]",
"[li(9,3):4::]",
"[para(9,5):]",
"[text(9,5):another list:]",
"[end-para:::True]",
"[BLANK(10,1):]",
"[end-ulist:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<ul>
<li>list 1
<ul>
<li>list 2
list 3</li>
</ul>
<hr />
<pre><code class="language-block">A code block
</code></pre>
<hr />
</li>
<li>another list</li>
</ul>
</blockquote>"""

# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)

@pytest.mark.gfm
def test_extra_999():
Expand Down

0 comments on commit b3d0404

Please sign in to comment.