Skip to content

Commit

Permalink
Added inline cases of each inline token starting, ending, as the only…
Browse files Browse the repository at this point in the history
…, and in the middle of a paragraph, atx heading, and setext heading block.
  • Loading branch information
jackdewinter committed Sep 16, 2020
1 parent 573cdf3 commit 57d7add
Show file tree
Hide file tree
Showing 6 changed files with 3,743 additions and 343 deletions.
17 changes: 15 additions & 2 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,20 @@

## Bugs - General - Solve

- estiated_column_number
- verify that any special characters used can be recognized and specially escaped
- 52e - make new case with different indent levels for each
- why? shouldn't each one be of the proper length?
```
if split_extracted_whitespace and last_token.rehydrate_index < len(
split_extracted_whitespace
):
```
- verify that 2 blank lines solution
`if previous_inline_token.token_name != MarkdownToken.token_blank_line`
does not affect single line and 3+ line solutions
- why needed?

## Bugs - General - Uncategorized

- why does hard break not have \n?
Expand All @@ -39,7 +53,6 @@
- hard break at start of text?
- 634a in bq and in list
- links, 518b
- each inline token surrounded by text
- 518b inside of list and/or block quote
- code span and other multiline inline
- code span with and without multiline, surrounded by text, like 662,091
Expand All @@ -54,7 +67,7 @@
- all multiline elements within a paragaph (and other containers that allow newline)
- make sure line/column is tracking text indenting on each line
- combinations together
- 518 series, but for SetExt and other containers
- 518 series, but for SetExt and Atx containers

## Bugs - Character Entities

Expand Down
331 changes: 0 additions & 331 deletions test/test_markdown_atx_headings.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,334 +582,3 @@ def test_atx_headings_049():
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_1():
"""
Test case extra 1: ATX headings with a code span.
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## this is a ``fun`` day"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4):this is a : ]",
"[icode-span(1,14):fun:``::]",
"[text(1,21): day:]",
"[end-atx:::False]",
]
expected_gfm = """<h2>this is a <code>fun</code> day</h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_2():
"""
Test case extra 2: ATX headings starts with a backslash
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## \\\\this is a fun day\\\\"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4):\\\b\\this is a fun day\\\b\\: ]",
"[end-atx:::False]",
]
expected_gfm = """<h2>\\this is a fun day\\</h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_3():
"""
Test case extra 3: ATX headings starts with a backslash as in a hard line break
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## \\"""
expected_tokens = ["[atx(1,1):2:0:]", "[text(1,4):\\: ]", "[end-atx:::False]"]
expected_gfm = """<h2>\\</h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_4():
"""
Test case extra 4: ATX headings starts with 2+ spaces as in a hard line break
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## """
expected_tokens = ["[atx(1,1):2:0:]", "[text(1,7):: ]", "[end-atx:::False]"]
expected_gfm = """<h2></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_5():
"""
Test case extra 5: ATX headings string starting with a code span.
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## ``this is a fun day``"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
"[icode-span(1,4):this is a fun day:``::]",
"[end-atx:::False]",
]
expected_gfm = """<h2><code>this is a fun day</code></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_6():
"""
Test case extra 6: ATX headings string starting with a character reference.
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## &amp; the band played on"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4):\a&amp;\a\a&\a&amp;\a\a the band played on: ]",
"[end-atx:::False]",
]
expected_gfm = """<h2>&amp; the band played on</h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_7():
"""
Test case extra 7: ATX headings string starting with a raw html block.
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## <there it='is'>"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
"[raw-html(1,4):there it='is']",
"[end-atx:::False]",
]
expected_gfm = """<h2><there it='is'></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_8():
"""
Test case extra 8: ATX headings string starting with an URI autolink
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## <http://www.google.com>"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
"[uri-autolink(1,4):http://www.google.com]",
"[end-atx:::False]",
]
expected_gfm = (
"""<h2><a href="http://www.google.com">http://www.google.com</a></h2>"""
)

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_9():
"""
Test case extra 9: ATX headings string starting with an email autolink
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## <foo@bar.com>"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
"[email-autolink(1,4):foo@bar.com]",
"[end-atx:::False]",
]
expected_gfm = """<h2><a href="mailto:foo@bar.com">foo@bar.com</a></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_10():
"""
Test case extra 10: ATX headings string starting with an emphasis
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## *it's me!*"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
"[emphasis(1,4):1:*]",
"[text(1,5):it's me!:]",
"[end-emphasis(1,13)::1:*:False]",
"[end-atx:::False]",
]
expected_gfm = """<h2><em>it's me!</em></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_11():
"""
Test case extra 11: ATX headings string starting with a link. also see 183
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## [Foo](/uri)"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
"[link(1,4):inline:/uri:::::Foo:False::::]",
"[text(1,5):Foo:]",
"[end-link:::False]",
"[end-atx:::False]",
]
expected_gfm = """<h2><a href="/uri">Foo</a></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)


@pytest.mark.gfm
def test_atx_headings_extra_12():
"""
Test case extra 12: ATX headings string starting with an image
"""

# Arrange
tokenizer = TokenizedMarkdown()
transformer = TransformToGfm()
source_markdown = """## ![foo](/url "title")"""
expected_tokens = [
"[atx(1,1):2:0:]",
"[text(1,4)::\a \a\x03\a]",
'[image(1,4):inline:/url:title:foo::::foo:False:":: :]',
"[end-atx:::False]",
]
expected_gfm = """<h2><img src="/url" alt="foo" title="title" /></h2>"""

# Act
actual_tokens = tokenizer.transform(source_markdown)
actual_gfm = transformer.transform(actual_tokens)

# Assert
assert_if_lists_different(expected_tokens, actual_tokens)
assert_if_strings_different(expected_gfm, actual_gfm)
assert_token_consistency(source_markdown, actual_tokens)
Loading

0 comments on commit 57d7add

Please sign in to comment.