From 1d84c69bf898daf7aac97ca4805ea1bb360bd009 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Thu, 26 Mar 2020 20:32:11 +0000 Subject: [PATCH 01/32] Move to markdown-it parser --- _archive/_docutils_renderer.py | 1243 ++++++ myst_parser/__init__.py | 41 +- myst_parser/block_tokens.py | 225 - myst_parser/docutils_renderer.py | 1343 ++---- myst_parser/html_renderer.py | 139 - myst_parser/json_renderer.py | 43 - myst_parser/main.py | 63 + myst_parser/mocking.py | 398 ++ myst_parser/span_tokens.py | 66 - myst_parser/sphinx_parser.py | 18 +- myst_parser/sphinx_renderer.py | 132 + tests/test_commonmark/commonmark.json | 3896 +++++++++-------- tests/test_commonmark/spec.sh | 2 +- tests/test_commonmark/test_commonmark.py | 32 +- tests/test_renderers/conftest.py | 26 - tests/test_renderers/fixtures/basic.md | 556 +++ .../fixtures/docutil_directives.md | 430 ++ .../test_renderers/fixtures/docutil_roles.md | 141 + tests/test_renderers/fixtures/role_options.md | 154 + .../fixtures/sphinx_directives.md | 483 ++ tests/test_renderers/fixtures/sphinx_roles.md | 672 +++ tests/test_renderers/sphinx_directives.json | 595 --- tests/test_renderers/sphinx_roles.json | 525 --- tests/test_renderers/test_docutils.py | 476 -- .../test_docutils/test_cross_referencing.xml | 18 - .../test_docutils/test_full_run.xml | 69 - .../test_link_def_in_directive_nested.xml | 10 - tests/test_renderers/test_fixtures.py | 89 + tests/test_renderers/test_html.py | 128 - .../test_html/test_minimal_html_page.html | 44 - tests/test_renderers/test_roles_directives.py | 283 -- ...ctive_options_error_block_style_text0_.xml | 15 - ...ctive_options_error_colon_style_text1_.xml | 13 - tests/test_syntax/test_ast.py | 140 - .../test_block_break_basic_strings0_.yml | 14 - .../test_block_break_escaped_strings3_.yml | 34 - ...k_following_content_no_space_strings8_.yml | 14 - ...lock_break_following_content_strings5_.yml | 14 - ..._block_break_following_space_strings6_.yml | 14 - ...est_block_break_follows_list_strings7_.yml | 48 - .../test_block_break_indent_2_strings1_.yml | 14 - .../test_block_break_indent_4_strings2_.yml | 23 - .../test_block_break_inline_strings4_.yml | 20 - .../test_ast/test_comment_basic_strings0_.yml | 14 - .../test_comment_escaped_strings3_.yml | 34 - .../test_comment_follows_list_strings5_.yml | 48 - .../test_comment_indent_2_strings1_.yml | 14 - .../test_comment_indent_4_strings2_.yml | 23 - .../test_comment_inline_strings4_.yml | 20 - .../test_front_matter_basic_strings0_.yml | 15 - ...t_link_references_ref_escape_strings3_.yml | 48 - ...st_link_references_ref_first_strings0_.yml | 32 - ...est_link_references_ref_last_strings1_.yml | 32 - ...t_link_references_ref_syntax_strings2_.yml | 39 - .../test_ast/test_role_basic_strings0_.yml | 28 - .../test_ast/test_role_escaped_strings3_.yml | 48 - .../test_role_external_code_strings11_.yml | 27 - .../test_role_external_emphasis_strings7_.yml | 35 - .../test_role_external_math_strings9_.yml | 20 - .../test_ast/test_role_indent_2_strings1_.yml | 28 - .../test_ast/test_role_indent_4_strings2_.yml | 23 - .../test_ast/test_role_inline_strings4_.yml | 35 - .../test_role_internal_code_strings10_.yml | 28 - .../test_role_internal_emphasis_strings6_.yml | 28 - .../test_role_internal_math_strings8_.yml | 28 - .../test_ast/test_role_multiple_strings5_.yml | 50 - tests/test_syntax/test_ast/test_table.yml | 137 - .../test_ast/test_target_basic_strings0_.yml | 28 - .../test_target_escaped_strings3_.yml | 34 - ...est_target_external_emphasis_strings6_.yml | 35 - .../test_target_indent_2_strings1_.yml | 28 - .../test_target_indent_4_strings2_.yml | 23 - .../test_ast/test_target_inline_strings4_.yml | 35 - ...est_target_internal_emphasis_strings5_.yml | 28 - tests/test_syntax/test_ast/test_walk.yml | 27 - 75 files changed, 6819 insertions(+), 6956 deletions(-) create mode 100644 _archive/_docutils_renderer.py delete mode 100644 myst_parser/block_tokens.py delete mode 100644 myst_parser/html_renderer.py delete mode 100644 myst_parser/json_renderer.py create mode 100644 myst_parser/main.py create mode 100644 myst_parser/mocking.py delete mode 100644 myst_parser/span_tokens.py create mode 100644 myst_parser/sphinx_renderer.py delete mode 100644 tests/test_renderers/conftest.py create mode 100644 tests/test_renderers/fixtures/basic.md create mode 100644 tests/test_renderers/fixtures/docutil_directives.md create mode 100644 tests/test_renderers/fixtures/docutil_roles.md create mode 100644 tests/test_renderers/fixtures/role_options.md create mode 100644 tests/test_renderers/fixtures/sphinx_directives.md create mode 100644 tests/test_renderers/fixtures/sphinx_roles.md delete mode 100644 tests/test_renderers/sphinx_directives.json delete mode 100644 tests/test_renderers/sphinx_roles.json delete mode 100644 tests/test_renderers/test_docutils.py delete mode 100644 tests/test_renderers/test_docutils/test_cross_referencing.xml delete mode 100644 tests/test_renderers/test_docutils/test_full_run.xml delete mode 100644 tests/test_renderers/test_docutils/test_link_def_in_directive_nested.xml create mode 100644 tests/test_renderers/test_fixtures.py delete mode 100644 tests/test_renderers/test_html.py delete mode 100644 tests/test_renderers/test_html/test_minimal_html_page.html delete mode 100644 tests/test_renderers/test_roles_directives.py delete mode 100644 tests/test_renderers/test_roles_directives/test_directive_options_error_block_style_text0_.xml delete mode 100644 tests/test_renderers/test_roles_directives/test_directive_options_error_colon_style_text1_.xml delete mode 100644 tests/test_syntax/test_ast.py delete mode 100644 tests/test_syntax/test_ast/test_block_break_basic_strings0_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_escaped_strings3_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_following_content_no_space_strings8_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_following_content_strings5_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_following_space_strings6_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_follows_list_strings7_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_indent_2_strings1_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_indent_4_strings2_.yml delete mode 100644 tests/test_syntax/test_ast/test_block_break_inline_strings4_.yml delete mode 100644 tests/test_syntax/test_ast/test_comment_basic_strings0_.yml delete mode 100644 tests/test_syntax/test_ast/test_comment_escaped_strings3_.yml delete mode 100644 tests/test_syntax/test_ast/test_comment_follows_list_strings5_.yml delete mode 100644 tests/test_syntax/test_ast/test_comment_indent_2_strings1_.yml delete mode 100644 tests/test_syntax/test_ast/test_comment_indent_4_strings2_.yml delete mode 100644 tests/test_syntax/test_ast/test_comment_inline_strings4_.yml delete mode 100644 tests/test_syntax/test_ast/test_front_matter_basic_strings0_.yml delete mode 100644 tests/test_syntax/test_ast/test_link_references_ref_escape_strings3_.yml delete mode 100644 tests/test_syntax/test_ast/test_link_references_ref_first_strings0_.yml delete mode 100644 tests/test_syntax/test_ast/test_link_references_ref_last_strings1_.yml delete mode 100644 tests/test_syntax/test_ast/test_link_references_ref_syntax_strings2_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_basic_strings0_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_escaped_strings3_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_external_code_strings11_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_external_emphasis_strings7_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_external_math_strings9_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_indent_2_strings1_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_indent_4_strings2_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_inline_strings4_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_internal_code_strings10_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_internal_emphasis_strings6_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_internal_math_strings8_.yml delete mode 100644 tests/test_syntax/test_ast/test_role_multiple_strings5_.yml delete mode 100644 tests/test_syntax/test_ast/test_table.yml delete mode 100644 tests/test_syntax/test_ast/test_target_basic_strings0_.yml delete mode 100644 tests/test_syntax/test_ast/test_target_escaped_strings3_.yml delete mode 100644 tests/test_syntax/test_ast/test_target_external_emphasis_strings6_.yml delete mode 100644 tests/test_syntax/test_ast/test_target_indent_2_strings1_.yml delete mode 100644 tests/test_syntax/test_ast/test_target_indent_4_strings2_.yml delete mode 100644 tests/test_syntax/test_ast/test_target_inline_strings4_.yml delete mode 100644 tests/test_syntax/test_ast/test_target_internal_emphasis_strings5_.yml delete mode 100644 tests/test_syntax/test_ast/test_walk.yml diff --git a/_archive/_docutils_renderer.py b/_archive/_docutils_renderer.py new file mode 100644 index 00000000..1fe3f45f --- /dev/null +++ b/_archive/_docutils_renderer.py @@ -0,0 +1,1243 @@ +from collections import OrderedDict +from contextlib import contextmanager +import copy +from os.path import splitext +from pathlib import Path +import re +import sys +from typing import List, Optional +from urllib.parse import urlparse, unquote + +from docutils import nodes +from docutils.frontend import OptionParser +from docutils.languages import get_language +from docutils.parsers.rst import directives, Directive, DirectiveError, roles +from docutils.parsers.rst import Parser as RSTParser +from docutils.parsers.rst.directives.misc import Include +from docutils.parsers.rst.states import RSTStateMachine, Body, Inliner +from docutils.statemachine import StringList +from docutils.utils import new_document, Reporter +import yaml + +from mistletoe import block_tokens, block_tokens_ext, span_tokens, span_tokens_ext +from mistletoe.base_elements import SourceLines +from mistletoe.renderers.base import BaseRenderer +from mistletoe.parse_context import get_parse_context, ParseContext + +from myst_parser import block_tokens as myst_block_tokens +from myst_parser import span_tokens as myst_span_tokens +from myst_parser.parse_directives import parse_directive_text, DirectiveParsingError +from myst_parser.utils import escape_url + + +class DocutilsRenderer(BaseRenderer): + """A mistletoe renderer to populate (in-place) a `docutils.document` AST. + + Note this renderer has no dependencies on Sphinx. + """ + + default_block_tokens = ( + block_tokens.HTMLBlock, + myst_block_tokens.LineComment, + block_tokens.BlockCode, + block_tokens.Heading, + myst_block_tokens.Quote, + block_tokens.CodeFence, + block_tokens.ThematicBreak, + myst_block_tokens.BlockBreak, + myst_block_tokens.List, + block_tokens_ext.Table, + block_tokens_ext.Footnote, + block_tokens.LinkDefinition, + myst_block_tokens.Paragraph, + ) + + default_span_tokens = ( + span_tokens.EscapeSequence, + myst_span_tokens.Role, + span_tokens.HTMLSpan, + span_tokens.AutoLink, + myst_span_tokens.Target, + span_tokens.CoreTokens, + span_tokens_ext.FootReference, + span_tokens_ext.Math, + # TODO there is no matching core element in docutils for strikethrough + # span_tokens_ext.Strikethrough, + span_tokens.InlineCode, + span_tokens.LineBreak, + span_tokens.RawText, + ) + + def __init__( + self, + document: Optional[nodes.document] = None, + current_node: Optional[nodes.Element] = None, + config: Optional[dict] = None, + parse_context: Optional[ParseContext] = None, + ): + """Initialise the renderer. + + :param document: The document to populate (or create a new one if None) + :param current_node: The root node from which to begin populating + (default is document, or should be an ancestor of document) + :param config: contains configuration specific to the rendering process + :param parse_context: the parse context stores global parsing variables, + such as the block/span tokens to search for, + and link/footnote definitions that have been collected. + If None, a new context will be instatiated, with the default + block/span tokens for this renderer. + These will be re-instatiated on ``__enter__``. + :type parse_context: mistletoe.parse_context.ParseContext + """ + self.config = config or {} + self.document = document or self.new_document() # type: nodes.document + self.reporter = self.document.reporter # type: Reporter + self.current_node = current_node or self.document # type: nodes.Element + self.language_module = self.document.settings.language_code # type: str + get_language(self.language_module) + self._level_to_elem = {0: self.document} + + super().__init__(parse_context=parse_context) + + def new_document(self, source_path="notset") -> nodes.document: + """Create a new docutils document.""" + settings = OptionParser(components=(RSTParser,)).get_default_values() + return new_document(source_path, settings=settings) + + def add_line_and_source_path(self, node, token): + """Copy the line number and document source path to the docutils node.""" + try: + node.line = token.position.line_start + 1 + except (AttributeError, TypeError): + pass + node.source = self.document["source"] + + def nested_render_text(self, text: str, lineno: int, token): + """Render unparsed text.""" + lines = SourceLines( + text, + start_line=lineno, + uri=self.document["source"], + metadata=token.position.data, + standardize_ends=True, + ) + doc_token = myst_block_tokens.Document.read( + lines, front_matter=True, reset_definitions=False + ) + # TODO think if this is the best way: here we consume front matter, + # but then remove it. this is for example if includes have front matter + doc_token.front_matter = None + # we mark the token as nested so that footnotes etc aren't rendered + doc_token.is_nested = True + self.render(doc_token) + + def render_children(self, token): + for child in token.children: + self.render(child) + + @contextmanager + def current_node_context(self, node, append: bool = False): + """Context manager for temporarily setting the current node.""" + if append: + self.current_node.append(node) + current_node = self.current_node + self.current_node = node + yield + self.current_node = current_node + + def render_document(self, token: block_tokens.Document): + if token.front_matter: + self.render_front_matter(token.front_matter) + self.render_children(token) + + if getattr(token, "is_nested", False): + # if the document is nested in another, we don't want to output footnotes + return self.document + + # we use the footnotes stored in the global context, + # rather than those stored on the document, + # since additional references may have been made in nested parses + footnotes = get_parse_context().foot_definitions + + # we don't use the foot_references stored on the global context, + # since references within directives/roles will have been added after + # those from the initial markdown parse + # instead we gather them from a walk of the created document + # foot_refs = get_parse_context().foot_references + foot_refs = OrderedDict() + for refnode in self.document.traverse(nodes.footnote_reference): + if refnode["refname"] not in foot_refs: + foot_refs[refnode["refname"]] = True + + if foot_refs: + self.current_node.append(nodes.transition()) + for footref in foot_refs: + if footref in footnotes: + self.render_footnote(footnotes[footref]) + + return self.document + + def render_front_matter(self, token): + """Pass document front matter data + + For RST, all field lists are captured by + ``docutils.docutils.parsers.rst.states.Body.field_marker``, + then, if one occurs at the document, it is transformed by + `docutils.docutils.transforms.frontmatter.DocInfo`, and finally + this is intercepted by sphinx and added to the env in + `sphinx.environment.collectors.metadata.MetadataCollector.process_doc` + + So technically the values should be parsed to AST, but this is redundant, + since `process_doc` just converts them back to text. + + """ + try: + data = token.get_data() + except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: + msg_node = self.reporter.error( + "Front matter block:\n" + str(error), line=token.position.line_start + ) + msg_node += nodes.literal_block(token.content, token.content) + self.current_node += [msg_node] + return + + docinfo = dict_to_docinfo(data) + self.current_node.append(docinfo) + + def render_footnote(self, token: block_tokens_ext.Footnote): + footnote = nodes.footnote() + self.add_line_and_source_path(footnote, token) + # footnote += nodes.label('', token.target) + footnote["names"].append(token.target) + footnote["auto"] = 1 + self.document.note_autofootnote(footnote) + self.document.note_explicit_target(footnote, footnote) + # TODO for now we wrap the content (which are list of spans tokens) + # in a paragraph, but eventually upstream in mistletoe this will already be + # block level tokens + self.current_node.append(footnote) + paragraph = nodes.paragraph("") + self.add_line_and_source_path(paragraph, token) + footnote.append(paragraph) + with self.current_node_context(paragraph, append=False): + self.render_children(token) + + def render_foot_reference(self, token): + """Footnote references are added as auto-numbered, + .i.e. `[^a]` is read as rST `[#a]_` + """ + refnode = nodes.footnote_reference("[^{}]".format(token.target)) + self.add_line_and_source_path(refnode, token) + refnode["auto"] = 1 + refnode["refname"] = token.target + # refnode += nodes.Text(token.target) + self.document.note_autofootnote_ref(refnode) + self.document.note_footnote_ref(refnode) + self.current_node.append(refnode) + + def render_paragraph(self, token): + if len(token.children) == 1 and isinstance( + token.children[0], myst_span_tokens.Target + ): + # promote the target to block level + return self.render_target(token.children[0]) + para = nodes.paragraph("") + self.add_line_and_source_path(para, token) + with self.current_node_context(para, append=True): + self.render_children(token) + + def render_line_comment(self, token): + self.current_node.append(nodes.comment(token.content, token.content)) + + def render_target(self, token): + text = token.children[0].content + name = nodes.fully_normalize_name(text) + target = nodes.target(text) + target["names"].append(name) + self.add_line_and_source_path(target, token) + self.document.note_explicit_target(target, self.current_node) + self.current_node.append(target) + + def render_raw_text(self, token): + text = token.content + self.current_node.append(nodes.Text(text, text)) + + def render_escape_sequence(self, token): + text = token.children[0].content + self.current_node.append(nodes.Text(text, text)) + + def render_line_break(self, token): + if token.soft: + self.current_node.append(nodes.Text("\n")) + else: + self.current_node.append(nodes.raw("", "
\n", format="html")) + + def render_strong(self, token): + node = nodes.strong() + self.add_line_and_source_path(node, token) + with self.current_node_context(node, append=True): + self.render_children(token) + + def render_emphasis(self, token): + node = nodes.emphasis() + self.add_line_and_source_path(node, token) + with self.current_node_context(node, append=True): + self.render_children(token) + + def render_quote(self, token): + quote = nodes.block_quote() + self.add_line_and_source_path(quote, token) + with self.current_node_context(quote, append=True): + self.render_children(token) + + def render_strikethrough(self, token): + # TODO there is no existing node/role for this + raise NotImplementedError + + def render_thematic_break(self, token): + node = nodes.transition() + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_block_break(self, token): + block_break = nodes.comment(token.content, token.content) + block_break["classes"] += ["block_break"] + self.add_line_and_source_path(block_break, token) + self.current_node.append(block_break) + + def render_math(self, token): + if token.content.startswith("$$"): + content = token.content[2:-2] + node = nodes.math_block(content, content, nowrap=False, number=None) + else: + content = token.content[1:-1] + node = nodes.math(content, content) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_block_code(self, token): + # this should never have a language, since it is just indented text, however, + # creating a literal_block with no language will raise a warning in sphinx + text = token.children[0].content + language = token.language or "none" + node = nodes.literal_block(text, text, language=language) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_code_fence(self, token): + if token.language.startswith("{") and token.language.endswith("}"): + return self.render_directive(token) + + text = token.children[0].content + language = token.language + if not language: + try: + sphinx_env = self.document.settings.env + language = sphinx_env.temp_data.get( + "highlight_language", sphinx_env.config.highlight_language + ) + except AttributeError: + pass + if not language: + language = self.config.get("highlight_language", "") + node = nodes.literal_block(text, text, language=language) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_inline_code(self, token): + text = token.children[0].content + node = nodes.literal(text, text) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def _is_section_level(self, level, section): + return self._level_to_elem.get(level, None) == section + + def _add_section(self, section, level): + parent_level = max( + section_level + for section_level in self._level_to_elem + if level > section_level + ) + parent = self._level_to_elem[parent_level] + parent.append(section) + self._level_to_elem[level] = section + + # Prune level to limit + self._level_to_elem = dict( + (section_level, section) + for section_level, section in self._level_to_elem.items() + if section_level <= level + ) + + def render_heading(self, token): + # Test if we're replacing a section level first + if isinstance(self.current_node, nodes.section): + if self._is_section_level(token.level, self.current_node): + self.current_node = self.current_node.parent + + title_node = nodes.title() + self.add_line_and_source_path(title_node, token) + + new_section = nodes.section() + self.add_line_and_source_path(new_section, token) + new_section.append(title_node) + + self._add_section(new_section, token.level) + + self.current_node = title_node + self.render_children(token) + + assert isinstance(self.current_node, nodes.title) + text = self.current_node.astext() + # if self.translate_section_name: + # text = self.translate_section_name(text) + name = nodes.fully_normalize_name(text) + section = self.current_node.parent + section["names"].append(name) + self.document.note_implicit_target(section, section) + self.current_node = section + + def handle_cross_reference(self, token, destination): + # TODO use the docutils error reporting mechanisms, rather than raising + if not self.config.get("ignore_missing_refs", False): + raise NotImplementedError( + "reference not found in current document: {}\n{}".format( + destination, token + ) + ) + + def render_link(self, token): + ref_node = nodes.reference() + self.add_line_and_source_path(ref_node, token) + # Check destination is supported for cross-linking and remove extension + # TODO escape urls? + destination = token.target + _, ext = splitext(destination) + # TODO check for other supported extensions, such as those specified in + # the Sphinx conf.py file but how to access this information? + # TODO this should probably only remove the extension for local paths, + # i.e. not uri's starting with http or other external prefix. + + # if ext.replace('.', '') in self.supported: + # destination = destination.replace(ext, '') + ref_node["refuri"] = destination + if token.title: + ref_node["title"] = token.title + next_node = ref_node + + url_check = urlparse(destination) + # If there's not a url scheme (e.g. 'https' for 'https:...' links), + # or there is a scheme but it's not in the list of known_url_schemes, + # then assume it's a cross-reference + known_url_schemes = self.config.get("known_url_schemes", None) + if known_url_schemes: + scheme_known = url_check.scheme in known_url_schemes + else: + scheme_known = bool(url_check.scheme) + + if not url_check.fragment and not scheme_known: + self.handle_cross_reference(token, destination) + else: + self.current_node.append(next_node) + with self.current_node_context(ref_node): + self.render_children(token) + + def render_image(self, token): + img_node = nodes.image() + self.add_line_and_source_path(img_node, token) + img_node["uri"] = token.src + + img_node["alt"] = "" + if token.children and isinstance(token.children[0], span_tokens.RawText): + img_node["alt"] = token.children[0].content + token.children[0].content = "" + + self.current_node.append(img_node) + # TODO how should non-raw alternative text be handled? + # with self.set_current_node(img_node): + # self.render_children(token) + + def render_list(self, token): + list_node = None + if token.start_at is not None: + list_node = nodes.enumerated_list() + # TODO deal with token.start_at? + # TODO support numerals/letters for lists + # (see https://stackoverflow.com/a/48372856/5033292) + # See docutils/docutils/parsers/rst/states.py:Body.enumerator + # list_node['enumtype'] = 'arabic', 'loweralpha', 'upperroman', etc. + # list_node['start'] + # list_node['prefix'] + # list_node['suffix'] + else: + list_node = nodes.bullet_list() + # TODO deal with token.loose? + self.add_line_and_source_path(list_node, token) + + self.current_node.append(list_node) + with self.current_node_context(list_node): + self.render_children(token) + + def render_list_item(self, token: myst_block_tokens.ListItem): + item_node = nodes.list_item() + self.add_line_and_source_path(item_node, token) + self.current_node.append(item_node) + with self.current_node_context(item_node): + self.render_children(token) + + def render_table(self, token): + table = nodes.table() + table["classes"] += ["colwidths-auto"] + # TODO column alignment + maxcols = max(len(c.children) for c in token.children) + # TODO are colwidths actually required + colwidths = [100 / maxcols] * maxcols + tgroup = nodes.tgroup(cols=len(colwidths)) + table += tgroup + for colwidth in colwidths: + colspec = nodes.colspec(colwidth=colwidth) + tgroup += colspec + + if hasattr(token, "header"): + thead = nodes.thead() + tgroup += thead + with self.current_node_context(thead): + self.render_table_row(token.header) + + tbody = nodes.tbody() + tgroup += tbody + + with self.current_node_context(tbody): + self.render_children(token) + + self.current_node.append(table) + + def render_table_row(self, token): + row = nodes.row() + with self.current_node_context(row, append=True): + self.render_children(token) + + def render_table_cell(self, token): + entry = nodes.entry() + with self.current_node_context(entry, append=True): + self.render_children(token) + + def render_auto_link(self, token): + if token.mailto: + refuri = "mailto:{}".format(token.target) + else: + refuri = escape_url(token.target) + ref_node = nodes.reference(token.target, token.target, refuri=refuri) + self.add_line_and_source_path(ref_node, token) + self.current_node.append(ref_node) + + def render_html_span(self, token): + self.current_node.append(nodes.raw("", token.content, format="html")) + + def render_html_block(self, token): + self.current_node.append(nodes.raw("", token.content, format="html")) + + def render_role(self, token): + content = token.children[0].content + name = token.role_name + # TODO role name white/black lists + try: + lineno = token.position.line_start + except (AttributeError, TypeError): + lineno = 0 + inliner = MockInliner(self, lineno) + role_func, messages = roles.role( + name, self.language_module, lineno, self.reporter + ) + rawsource = ":{}:`{}`".format(name, content) + # # backslash escapes converted to nulls (``\x00``) + text = span_tokens.EscapeSequence.strip(content) + if role_func: + nodes, messages2 = role_func(name, rawsource, text, lineno, inliner) + # return nodes, messages + messages2 + self.current_node += nodes + else: + message = self.reporter.error( + 'Unknown interpreted text role "{}".'.format(name), line=lineno + ) + # return ([self.problematic(content, content, msg)], messages + [msg]) + problematic = inliner.problematic(text, rawsource, message) + self.current_node += problematic + + def render_directive(self, token): + """Render special fenced code blocks as directives.""" + name = token.language[1:-1] + # TODO directive name white/black lists + content = token.children[0].content + self.document.current_line = token.position.line_start + + # get directive class + directive_class, messages = directives.directive( + name, self.language_module, self.document + ) # type: (Directive, list) + if not directive_class: + error = self.reporter.error( + "Unknown directive type '{}'\n".format(name), + # nodes.literal_block(content, content), + line=token.position.line_start, + ) + self.current_node += [error] + messages + return + + try: + arguments, options, body_lines = parse_directive_text( + directive_class, token.arguments, content + ) + except DirectiveParsingError as error: + error = self.reporter.error( + "Directive '{}':\n{}".format(name, error), + nodes.literal_block(content, content), + line=token.position.line_start, + ) + self.current_node += [error] + return + + # initialise directive + if issubclass(directive_class, Include): + directive_instance = MockIncludeDirective( + self, + name=name, + klass=directive_class, + arguments=arguments, + options=options, + body=body_lines, + token=token, + ) + else: + state_machine = MockStateMachine(self, token.position.line_start) + state = MockState( + self, state_machine, token.position.line_start, token=token + ) + directive_instance = directive_class( + name=name, + # the list of positional arguments + arguments=arguments, + # a dictionary mapping option names to values + options=options, + # the directive content line by line + content=StringList(body_lines, self.document["source"]), + # the absolute line number of the first line of the directive + lineno=token.position.line_start, + # the line offset of the first line of the content + content_offset=0, # TODO get content offset from `parse_directive_text` + # a string containing the entire directive + block_text="\n".join(body_lines), + state=state, + state_machine=state_machine, + ) + + # run directive + try: + result = directive_instance.run() + except DirectiveError as error: + msg_node = self.reporter.system_message( + error.level, error.msg, line=token.position.line_start + ) + msg_node += nodes.literal_block(content, content) + result = [msg_node] + except MockingError as exc: + error = self.reporter.error( + "Directive '{}' cannot be mocked:\n{}: {}".format( + name, exc.__class__.__name__, exc + ), + nodes.literal_block(content, content), + line=token.position.line_start, + ) + self.current_node += [error] + return + assert isinstance( + result, list + ), 'Directive "{}" must return a list of nodes.'.format(name) + for i in range(len(result)): + assert isinstance( + result[i], nodes.Node + ), 'Directive "{}" returned non-Node object (index {}): {}'.format( + name, i, result[i] + ) + self.current_node += result + + +class SphinxRenderer(DocutilsRenderer): + """A mistletoe renderer to populate (in-place) a `docutils.document` AST. + + This is sub-class of `DocutilsRenderer` that handles sphinx cross-referencing. + """ + + def __init__(self, *args, **kwargs): + """Intitalise SphinxRenderer + + :param load_sphinx_env: load a basic sphinx environment, + when using the renderer as a context manager outside if `sphinx-build` + :param sphinx_conf: a dictionary representation of the sphinx `conf.py` + :param sphinx_srcdir: a path to a source directory + (for example, can be used for `include` statements) + + To use this renderer in a 'standalone' fashion:: + + from myst_parser.block_tokens import Document + + with SphinxRenderer(load_sphinx_env=True, sphinx_conf={}) as renderer: + renderer.render(Document.read("source text")) + + """ + self.load_sphinx_env = kwargs.pop("load_sphinx_env", False) + self.sphinx_conf = kwargs.pop("sphinx_conf", None) + self.sphinx_srcdir = kwargs.pop("sphinx_srcdir", None) + super().__init__(*args, **kwargs) + + def handle_cross_reference(self, token, destination): + from sphinx import addnodes + + wrap_node = addnodes.pending_xref( + reftarget=unquote(destination), + reftype="any", + refdomain=None, # Added to enable cross-linking + refexplicit=len(token.children) > 0, + refwarn=True, + ) + self.add_line_and_source_path(wrap_node, token) + if token.title: + wrap_node["title"] = token.title + self.current_node.append(wrap_node) + text_node = nodes.literal("", "", classes=["xref", "any"]) + wrap_node.append(text_node) + with self.current_node_context(text_node): + self.render_children(token) + + def mock_sphinx_env(self, configuration=None, sourcedir=None): + """Create a minimimal Sphinx environment; + loading sphinx roles, directives, etc. + """ + from sphinx.application import builtin_extensions, Sphinx + from sphinx.config import Config + from sphinx.environment import BuildEnvironment + from sphinx.events import EventManager + from sphinx.project import Project + from sphinx.registry import SphinxComponentRegistry + from sphinx.util.tags import Tags + + class MockSphinx(Sphinx): + """Minimal sphinx init to load roles and directives.""" + + def __init__(self, confoverrides=None, srcdir=None): + self.extensions = {} + self.registry = SphinxComponentRegistry() + self.html_themes = {} + self.events = EventManager(self) + self.tags = Tags(None) + self.config = Config({}, confoverrides or {}) + self.config.pre_init_values() + self._init_i18n() + for extension in builtin_extensions: + self.registry.load_extension(self, extension) + # fresh env + self.doctreedir = None + self.srcdir = srcdir + self.confdir = None + self.outdir = None + self.project = Project(srcdir=srcdir, source_suffix=".md") + self.project.docnames = ["mock_docname"] + self.env = BuildEnvironment() + self.env.setup(self) + self.env.temp_data["docname"] = "mock_docname" + self.builder = None + + if not confoverrides: + return + + # this code is only required for more complex parsing with extensions + for extension in self.config.extensions: + self.setup_extension(extension) + buildername = "dummy" + self.preload_builder(buildername) + self.config.init_values() + self.events.emit("config-inited", self.config) + import tempfile + + with tempfile.TemporaryDirectory() as tempdir: + # creating a builder attempts to make the doctreedir + self.doctreedir = tempdir + self.builder = self.create_builder(buildername) + self.doctreedir = None + + app = MockSphinx(confoverrides=configuration, srcdir=sourcedir) + self.document.settings.env = app.env + return app + + def __enter__(self): + """If `load_sphinx_env=True`, we set up an environment, + to parse sphinx roles/directives, outside of a `sphinx-build`. + + This primarily copies the code in `sphinx.util.docutils.docutils_namespace` + and `sphinx.util.docutils.sphinx_domains`. + """ + if not self.load_sphinx_env: + return super().__enter__() + + # store currently loaded roles/directives, so we can revert on exit + self._directives = copy.copy(directives._directives) + self._roles = copy.copy(roles._roles) + # Monkey-patch directive and role dispatch, + # so that sphinx domain-specific markup takes precedence. + self._env = self.mock_sphinx_env( + configuration=self.sphinx_conf, sourcedir=self.sphinx_srcdir + ).env + from sphinx.util.docutils import sphinx_domains + + self._sphinx_domains = sphinx_domains(self._env) + self._sphinx_domains.enable() + + return super().__enter__() + + def __exit__(self, exception_type, exception_val, traceback): + if not self.load_sphinx_env: + return super().__exit__(exception_type, exception_val, traceback) + # revert loaded roles/directives + directives._directives = self._directives + roles._roles = self._roles + self._directives = None + self._roles = None + # unregister nodes (see `sphinx.util.docutils.docutils_namespace`) + from sphinx.util.docutils import additional_nodes, unregister_node + + for node in list(additional_nodes): + unregister_node(node) + additional_nodes.discard(node) + # revert directive/role function (see `sphinx.util.docutils.sphinx_domains`) + self._sphinx_domains.disable() + self._sphinx_domains = None + self._env = None + return super().__exit__(exception_type, exception_val, traceback) + + +class MockingError(Exception): + """An exception to signal an error during mocking of docutils components.""" + + +class MockInliner: + """A mock version of `docutils.parsers.rst.states.Inliner`. + + This is parsed to role functions. + """ + + def __init__(self, renderer: DocutilsRenderer, lineno: int): + self._renderer = renderer + self.document = renderer.document + self.reporter = renderer.document.reporter + if not hasattr(self.reporter, "get_source_and_line"): + # TODO this is called by some roles, + # but I can't see how that would work in RST? + self.reporter.get_source_and_line = lambda l: (self.document["source"], l) + self.parent = renderer.current_node + self.language = renderer.language_module + self.rfc_url = "rfc%d.html" + + def problematic(self, text: str, rawsource: str, message: nodes.system_message): + msgid = self.document.set_id(message, self.parent) + problematic = nodes.problematic(rawsource, rawsource, refid=msgid) + prbid = self.document.set_id(problematic) + message.add_backref(prbid) + return problematic + + # TODO add parse method + + def __getattr__(self, name): + """This method is only be called if the attribute requested has not + been defined. Defined attributes will not be overridden. + """ + # TODO use document.reporter mechanism? + if hasattr(Inliner, name): + msg = "{cls} has not yet implemented attribute '{name}'".format( + cls=type(self).__name__, name=name + ) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + + +class MockState: + """A mock version of `docutils.parsers.rst.states.RSTState`. + + This is parsed to the `Directives.run()` method, + so that they may run nested parses on their content that will be parsed as markdown, + rather than RST. + """ + + def __init__( + self, + renderer: DocutilsRenderer, + state_machine: "MockStateMachine", + lineno: int, + token, + ): + self._renderer = renderer + self._lineno = lineno + self._token = token + self.document = renderer.document + self.state_machine = state_machine + + class Struct: + document = self.document + reporter = self.document.reporter + language = self.document.settings.language_code + title_styles = [] + section_level = max(renderer._level_to_elem) + section_bubble_up_kludge = False + inliner = MockInliner(renderer, lineno) + + self.memo = Struct + + def nested_parse( + self, + block: StringList, + input_offset: int, + node: nodes.Element, + match_titles: bool = False, + state_machine_class=None, + state_machine_kwargs=None, + ): + current_match_titles = self.state_machine.match_titles + self.state_machine.match_titles = match_titles + with self._renderer.current_node_context(node): + self._renderer.nested_render_text( + block, self._lineno + input_offset, token=self._token + ) + self.state_machine.match_titles = current_match_titles + + def inline_text(self, text: str, lineno: int): + # TODO return messages? + messages = [] + paragraph = nodes.paragraph("") + # here we instatiate a new renderer, + # so that the nested parse does not effect the current renderer, + # but we use the same global parse context, so that link references, etc + # are added to the global parse. + renderer = self._renderer.__class__( + document=self.document, + current_node=paragraph, + parse_context=get_parse_context(), + ) + lines = SourceLines( + text, + start_line=self._lineno, + uri=self.document["source"], + metadata=self._token.position.data, + standardize_ends=True, + ) + doc_token = myst_block_tokens.Document.read( + lines, front_matter=False, reset_definitions=False + ) + # we mark the token as nested so that footnotes etc aren't rendered + doc_token.is_nested = True + renderer.render(doc_token) + textnodes = [] + if paragraph.children: + # first child should be paragraph + textnodes = paragraph.children[0].children + return textnodes, messages + + # U+2014 is an em-dash: + attribution_pattern = re.compile("^((?:---?(?!-)|\u2014) *)(.+)") + + def block_quote(self, lines: List[str], line_offset: int): + """Parse a block quote, which is a block of text, + followed by an (optional) attribution. + + :: + + No matter where you go, there you are. + + -- Buckaroo Banzai + """ + elements = [] + # split attribution + last_line_blank = False + blockquote_lines = lines + attribution_lines = [] + attribution_line_offset = None + # First line after a blank line must begin with a dash + for i, line in enumerate(lines): + if not line.strip(): + last_line_blank = True + continue + if not last_line_blank: + last_line_blank = False + continue + last_line_blank = False + match = self.attribution_pattern.match(line) + if not match: + continue + attribution_line_offset = i + attribution_lines = [match.group(2)] + for at_line in lines[i + 1 :]: + indented_line = at_line[len(match.group(1)) :] + if len(indented_line) != len(at_line.lstrip()): + break + attribution_lines.append(indented_line) + blockquote_lines = lines[:i] + break + # parse block + blockquote = nodes.block_quote() + self.nested_parse(blockquote_lines, line_offset, blockquote) + elements.append(blockquote) + # parse attribution + if attribution_lines: + attribution_text = "\n".join(attribution_lines) + lineno = self._lineno + line_offset + attribution_line_offset + textnodes, messages = self.inline_text(attribution_text, lineno) + attribution = nodes.attribution(attribution_text, "", *textnodes) + ( + attribution.source, + attribution.line, + ) = self.state_machine.get_source_and_line(lineno) + blockquote += attribution + elements += messages + return elements + + def build_table(self, tabledata, tableline, stub_columns=0, widths=None): + return Body.build_table(self, tabledata, tableline, stub_columns, widths) + + def build_table_row(self, rowdata, tableline): + return Body.build_table_row(self, rowdata, tableline) + + def __getattr__(self, name): + """This method is only be called if the attribute requested has not + been defined. Defined attributes will not be overridden. + """ + if hasattr(Body, name): + msg = "{cls} has not yet implemented attribute '{name}'".format( + cls=type(self).__name__, name=name + ) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + + +class MockStateMachine: + """A mock version of `docutils.parsers.rst.states.RSTStateMachine`. + + This is parsed to the `Directives.run()` method. + """ + + def __init__(self, renderer: DocutilsRenderer, lineno: int): + self._renderer = renderer + self._lineno = lineno + self.document = renderer.document + self.reporter = self.document.reporter + self.node = renderer.current_node + self.match_titles = True + + # TODO to allow to access like attributes like input_lines, + # we would need to store the input lines, + # probably via the `Document` token, + # and maybe self._lines = lines[:], then for AstRenderer, + # ignore private attributes + + def get_source(self, lineno: Optional[int] = None): + """Return document source path.""" + return self.document["source"] + + def get_source_and_line(self, lineno: Optional[int] = None): + """Return (source path, line) tuple for current or given line number.""" + return self.document["source"], lineno or self._lineno + + def __getattr__(self, name): + """This method is only be called if the attribute requested has not + been defined. Defined attributes will not be overridden. + """ + if hasattr(RSTStateMachine, name): + msg = "{cls} has not yet implemented attribute '{name}'".format( + cls=type(self).__name__, name=name + ) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + + +class MockIncludeDirective: + """This directive uses a lot of statemachine logic that is not yet mocked. + Therefore, we treat it as a special case (at least for now). + + See: + https://docutils.sourceforge.io/docs/ref/rst/directives.html#including-an-external-document-fragment + """ + + def __init__( + self, + renderer: DocutilsRenderer, + name: str, + klass: Include, + arguments: list, + options: dict, + body: List[str], + token, + ): + self.renderer = renderer + self.document = renderer.document + self.name = name + self.klass = klass + self.arguments = arguments + self.options = options + self.body = body + self.lineno = token.position.line_start + self.token = token + + def run(self): + + from docutils.parsers.rst.directives.body import CodeBlock, NumberLines + + if not self.document.settings.file_insertion_enabled: + raise DirectiveError(2, 'Directive "{}" disabled.'.format(self.name)) + + source_dir = Path(self.document["source"]).absolute().parent + include_arg = "".join([s.strip() for s in self.arguments[0].splitlines()]) + + if include_arg.startswith("<") and include_arg.endswith(">"): + # # docutils "standard" includes + path = Path(self.klass.standard_include_path).joinpath(include_arg[1:-1]) + else: + # if using sphinx interpret absolute paths "correctly", + # i.e. relative to source directory + try: + sphinx_env = self.document.settings.env + _, include_arg = sphinx_env.relfn2path(self.arguments[0]) + sphinx_env.note_included(include_arg) + except AttributeError: + pass + path = Path(include_arg) + path = source_dir.joinpath(path) + + # read file + encoding = self.options.get("encoding", self.document.settings.input_encoding) + error_handler = self.document.settings.input_encoding_error_handler + # tab_width = self.options.get("tab-width", self.document.settings.tab_width) + try: + file_content = path.read_text(encoding=encoding, errors=error_handler) + except Exception as error: + raise DirectiveError( + 4, + 'Directive "{}": error reading file: {}\n{error}.'.format( + self.name, path, error + ), + ) + + # get required section of text + startline = self.options.get("start-line", None) + endline = self.options.get("end-line", None) + file_content = "\n".join(file_content.splitlines()[startline:endline]) + startline = startline or 0 + for split_on_type in ["start-after", "end-before"]: + split_on = self.options.get(split_on_type, None) + if not split_on: + continue + split_index = file_content.find(split_on) + if split_index < 0: + raise DirectiveError( + 4, + 'Directive "{}"; option "{}": text not found "{}".'.format( + self.name, split_on_type, split_on + ), + ) + if split_on_type == "start-after": + startline += split_index + len(split_on) + file_content = file_content[split_index + len(split_on) :] + else: + file_content = file_content[:split_index] + + if "literal" in self.options: + literal_block = nodes.literal_block( + file_content, source=str(path), classes=self.options.get("class", []) + ) + literal_block.line = 1 # TODO don;t think this should be 1? + self.add_name(literal_block) + if "number-lines" in self.options: + try: + startline = int(self.options["number-lines"] or 1) + except ValueError: + raise DirectiveError( + 3, ":number-lines: with non-integer " "start value" + ) + endline = startline + len(file_content.splitlines()) + if file_content.endswith("\n"): + file_content = file_content[:-1] + tokens = NumberLines([([], file_content)], startline, endline) + for classes, value in tokens: + if classes: + literal_block += nodes.inline(value, value, classes=classes) + else: + literal_block += nodes.Text(value) + else: + literal_block += nodes.Text(file_content) + return [literal_block] + if "code" in self.options: + self.options["source"] = str(path) + state_machine = MockStateMachine(self.renderer, self.lineno) + state = MockState(self.renderer, state_machine, self.lineno, self.token) + codeblock = CodeBlock( + name=self.name, + arguments=[self.options.pop("code")], + options=self.options, + content=file_content.splitlines(), + lineno=self.lineno, + content_offset=0, + block_text=file_content, + state=state, + state_machine=state_machine, + ) + return codeblock.run() + + # Here we perform a nested render, but temporarily setup the document/reporter + # with the correct document path and lineno for the included file. + source = self.renderer.document["source"] + rsource = self.renderer.reporter.source + line_func = getattr(self.renderer.reporter, "get_source_and_line", None) + try: + self.renderer.document["source"] = str(path) + self.renderer.reporter.source = str(path) + self.renderer.reporter.get_source_and_line = lambda l: (str(path), l) + self.renderer.nested_render_text(file_content, startline, token=self.token) + finally: + self.renderer.document["source"] = source + self.renderer.reporter.source = rsource + if line_func is not None: + self.renderer.reporter.get_source_and_line = line_func + else: + del self.renderer.reporter.get_source_and_line + return [] + + def add_name(self, node): + """Append self.options['name'] to node['names'] if it exists. + + Also normalize the name string and register it as explicit target. + """ + if "name" in self.options: + name = nodes.fully_normalize_name(self.options.pop("name")) + if "name" in node: + del node["name"] + node["names"].append(name) + self.renderer.document.note_explicit_target(node, node) + + +def dict_to_docinfo(data): + """Render a key/val pair as a docutils field node.""" + # TODO this data could be used to support default option values for directives + docinfo = nodes.docinfo() + + # Throw away all non-stringy values + # TODO: support more complex data structures as values + for key, value in data.items(): + if not isinstance(value, (str, int, float)): + continue + value = str(value) + field_node = nodes.field() + field_node.source = value + field_node += nodes.field_name(key, "", nodes.Text(key, key)) + field_node += nodes.field_body(value, nodes.Text(value, value)) + docinfo += field_node + return docinfo diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index df8e673e..b065df69 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -1,43 +1,4 @@ -__version__ = "0.7.1" - - -def text_to_tokens(text: str): - """Convert some text to the MyST base AST.""" - from myst_parser.block_tokens import Document - from myst_parser.json_renderer import JsonRenderer - - # this loads the MyST specific token parsers - with JsonRenderer(): - return Document.read(text) - - -def render_tokens(root_token, renderer, **kwargs): - """Convert a token to another format.""" - with renderer(**kwargs) as renderer: - return renderer.render(root_token) - - -def parse_text(text: str, output_type: str, **kwargs): - """Convert MyST text to another format. - - :param text: the text to convert - :param output_type: one of 'dict', 'html', 'docutils', 'sphinx' - :param kwargs: parsed to the render initiatiation - """ - if output_type == "dict": - from myst_parser.ast_renderer import AstRenderer as renderer_cls - elif output_type == "html": - from myst_parser.html_renderer import HTMLRenderer as renderer_cls - elif output_type == "docutils": - from myst_parser.docutils_renderer import DocutilsRenderer as renderer_cls - elif output_type == "sphinx": - from myst_parser.docutils_renderer import SphinxRenderer as renderer_cls - else: - raise ValueError("output_type not recognised: {}".format(output_type)) - from myst_parser.block_tokens import Document - - with renderer_cls(**kwargs) as renderer: - return renderer.render(Document.read(text)) +__version__ = "0.8.0" def setup(app): diff --git a/myst_parser/block_tokens.py b/myst_parser/block_tokens.py deleted file mode 100644 index 3c6ce68a..00000000 --- a/myst_parser/block_tokens.py +++ /dev/null @@ -1,225 +0,0 @@ -import re -from typing import Tuple - -import attr - -from mistletoe import block_tokens -from mistletoe.base_elements import Position -from mistletoe.block_tokens import Heading, ThematicBreak, CodeFence -from mistletoe.attr_doc import autodoc - - -@autodoc -@attr.s(slots=False, kw_only=True) -class Document(block_tokens.Document): - """Document token.""" - - # TODO this class should eventually be removed - - @classmethod - def read(cls, *args, **kwargs): - # default to front matter is True - kwargs["front_matter"] = kwargs.get("front_matter", True) - doc = super().read(*args, **kwargs) - - if kwargs.get("propogate_pos", True): - # TODO this is a placeholder for implementing span level range storage - # (with start/end character attributes) - for result in doc.walk(): - if getattr(result.node, "position", None) is None: - try: - result.node.position = result.parent.position - except (AttributeError, TypeError): - raise - return doc - - -@autodoc -@attr.s(slots=True, kw_only=True) -class LineComment(block_tokens.BlockToken): - """Line comment start with % """ - - content: str = attr.ib( - repr=False, metadata={"doc": "literal strings rendered as-is"} - ) - raw: str = attr.ib(repr=False, metadata={"doc": "literal strings rendered as-is"}) - position: Tuple[int, int] = attr.ib( - metadata={"doc": "Line position in source text (start, end)"} - ) - - pattern = re.compile(r"^ {0,3}\%\s*(.*)") - - @classmethod - def start(cls, line): - match_obj = cls.pattern.match(line) - if match_obj is None: - return False - cls.content = (match_obj.group(1) or "").strip() - return True - - @classmethod - def read(cls, lines): - line = next(lines) - return cls( - raw=line.splitlines()[0], - content=cls.content, - position=Position.from_source_lines(lines), - ) - - -@autodoc -@attr.s(slots=True, kw_only=True) -class BlockBreak(block_tokens.BlockToken): - """Block break token ``+++``. - - This syntax is myst specific, used to denote the start of a new block of text. - This constuct's intended use case is for mapping to cell based document formats, - like jupyter notebooks, to indicate a new text cell. - """ - - content: str = attr.ib( - repr=False, metadata={"doc": "literal strings rendered as-is"} - ) - raw: str = attr.ib(repr=False, metadata={"doc": "literal strings rendered as-is"}) - position: Tuple[int, int] = attr.ib( - metadata={"doc": "Line position in source text (start, end)"} - ) - - pattern = re.compile(r"^ {0,3}(?:(\+)\s*?)(?:\1\s*?){2,}(.*)$") - - def __init__(self, result): - content, line, lineno = result - self.content = content - self.raw = line.splitlines()[0] - self.range = (lineno, lineno) - - @classmethod - def start(cls, line): - match_obj = cls.pattern.match(line) - if match_obj is None: - return False - cls.content = (match_obj.group(2) or "").strip() - return True - - @classmethod - def read(cls, lines): - line = next(lines) - return cls( - raw=line.splitlines()[0], - content=cls.content, - position=Position.from_source_lines(lines), - ) - - -@autodoc -@attr.s(slots=True, kw_only=True) -class Quote(block_tokens.Quote): - """Quote token. (`["> # heading\\n", "> paragraph\\n"]`). - - MyST variant, that includes transitions to `LineComment` and `BlockBreak`. - """ - - @classmethod - def transition(cls, next_line): - return ( - next_line is None - or next_line.strip() == "" - or LineComment.start(next_line) - or Heading.start(next_line) - or CodeFence.start(next_line) - or ThematicBreak.start(next_line) - or BlockBreak.start(next_line) - or List.start(next_line) - ) - - -@autodoc -@attr.s(slots=True, kw_only=True) -class Paragraph(block_tokens.Paragraph): - """Paragraph token. (`["some\\n", "continuous\\n", "lines\\n"]`) - - Boundary between span-level and block-level tokens. - - MyST variant, that includes transitions to `LineComment` and `BlockBreak`. - """ - - @classmethod - def transition(cls, next_line): - return ( - next_line is None - or next_line.strip() == "" - or LineComment.start(next_line) - or Heading.start(next_line) - or CodeFence.start(next_line) - or Quote.start(next_line) - or BlockBreak.start(next_line) - ) - - -@autodoc -@attr.s(slots=True, kw_only=True) -class List(block_tokens.List): - """List token (unordered or ordered) - - MyST variant, that includes transitions to `LineComment` and `BlockBreak`. - """ - - @classmethod - def read(cls, lines): - start_line = lines.lineno - leader = None - next_marker = None - children = [] - while True: - item = ListItem.read(lines, next_marker) - next_marker = item.next_marker - item_leader = item.leader - if leader is None: - leader = item_leader - elif not cls.same_marker_type(leader, item_leader): - lines.reset() - break - children.append(item) - if next_marker is None: - break - - if children: - # Only consider the last list item loose if there's more than one element - last_parse_buffer = children[-1] - last_parse_buffer.loose = ( - len(last_parse_buffer.children) > 1 and last_parse_buffer.loose - ) - - loose = any(item.loose for item in children) - leader = children[0].leader - start = None - if len(leader) != 1: - start = int(leader[:-1]) - return cls( - children=children, - loose=loose, - start_at=start, - position=Position.from_source_lines(lines, start_line=start_line), - ) - - -@autodoc -@attr.s(slots=True, kw_only=True) -class ListItem(block_tokens.ListItem): - """List items. - - Not included in the parsing process, but called by List. - - MyST variant, that includes transitions to `LineComment` and `BlockBreak`. - """ - - @staticmethod - def transition(next_line): - return ( - Heading.start(next_line) - or LineComment.start(next_line) - or Quote.start(next_line) - or CodeFence.start(next_line) - or ThematicBreak.start(next_line) - or BlockBreak.start(next_line) - ) diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 1fe3f45f..62aabf3a 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -1,139 +1,167 @@ -from collections import OrderedDict +"""NOTE: this will eventually be moved out of core""" from contextlib import contextmanager -import copy +import inspect +import json from os.path import splitext -from pathlib import Path -import re -import sys -from typing import List, Optional -from urllib.parse import urlparse, unquote +from typing import List +from urllib.parse import urlparse + +import yaml from docutils import nodes from docutils.frontend import OptionParser + from docutils.languages import get_language from docutils.parsers.rst import directives, Directive, DirectiveError, roles from docutils.parsers.rst import Parser as RSTParser from docutils.parsers.rst.directives.misc import Include -from docutils.parsers.rst.states import RSTStateMachine, Body, Inliner from docutils.statemachine import StringList from docutils.utils import new_document, Reporter -import yaml -from mistletoe import block_tokens, block_tokens_ext, span_tokens, span_tokens_ext -from mistletoe.base_elements import SourceLines -from mistletoe.renderers.base import BaseRenderer -from mistletoe.parse_context import get_parse_context, ParseContext - -from myst_parser import block_tokens as myst_block_tokens -from myst_parser import span_tokens as myst_span_tokens -from myst_parser.parse_directives import parse_directive_text, DirectiveParsingError -from myst_parser.utils import escape_url - - -class DocutilsRenderer(BaseRenderer): - """A mistletoe renderer to populate (in-place) a `docutils.document` AST. - - Note this renderer has no dependencies on Sphinx. - """ - - default_block_tokens = ( - block_tokens.HTMLBlock, - myst_block_tokens.LineComment, - block_tokens.BlockCode, - block_tokens.Heading, - myst_block_tokens.Quote, - block_tokens.CodeFence, - block_tokens.ThematicBreak, - myst_block_tokens.BlockBreak, - myst_block_tokens.List, - block_tokens_ext.Table, - block_tokens_ext.Footnote, - block_tokens.LinkDefinition, - myst_block_tokens.Paragraph, - ) - - default_span_tokens = ( - span_tokens.EscapeSequence, - myst_span_tokens.Role, - span_tokens.HTMLSpan, - span_tokens.AutoLink, - myst_span_tokens.Target, - span_tokens.CoreTokens, - span_tokens_ext.FootReference, - span_tokens_ext.Math, - # TODO there is no matching core element in docutils for strikethrough - # span_tokens_ext.Strikethrough, - span_tokens.InlineCode, - span_tokens.LineBreak, - span_tokens.RawText, - ) - - def __init__( - self, - document: Optional[nodes.document] = None, - current_node: Optional[nodes.Element] = None, - config: Optional[dict] = None, - parse_context: Optional[ParseContext] = None, - ): - """Initialise the renderer. - - :param document: The document to populate (or create a new one if None) - :param current_node: The root node from which to begin populating - (default is document, or should be an ancestor of document) - :param config: contains configuration specific to the rendering process - :param parse_context: the parse context stores global parsing variables, - such as the block/span tokens to search for, - and link/footnote definitions that have been collected. - If None, a new context will be instatiated, with the default - block/span tokens for this renderer. - These will be re-instatiated on ``__enter__``. - :type parse_context: mistletoe.parse_context.ParseContext - """ - self.config = config or {} - self.document = document or self.new_document() # type: nodes.document +from markdown_it import MarkdownIt +from markdown_it.token import Token, nest_tokens +from markdown_it.utils import AttrDict +from markdown_it.common.utils import escapeHtml + +from myst_parser.mocking import ( + MockInliner, + MockState, + MockStateMachine, + MockingError, + MockIncludeDirective, +) +from .parse_directives import parse_directive_text, DirectiveParsingError + + +def make_document(source_path="notset") -> nodes.document: + """Create a new docutils document.""" + settings = OptionParser(components=(RSTParser,)).get_default_values() + return new_document(source_path, settings=settings) + + +class DocutilsRenderer: + __output__ = "docutils" + + def __init__(self, parser: MarkdownIt): + self.md = parser + self.rules = { + k: v + for k, v in inspect.getmembers(self, predicate=inspect.ismethod) + if k.startswith("render_") and k != "render_children" + } + + def setup_render(self, options, env): + self.env = env + self.config = options + self.document = self.config.get("document", make_document()) + self.current_node = self.config.get( + "current_node", self.document + ) # type: nodes.Element self.reporter = self.document.reporter # type: Reporter - self.current_node = current_node or self.document # type: nodes.Element self.language_module = self.document.settings.language_code # type: str get_language(self.language_module) self._level_to_elem = {0: self.document} - super().__init__(parse_context=parse_context) + def render(self, tokens: List[Token], options, env: AttrDict): + """Run the render on a token stream. - def new_document(self, source_path="notset") -> nodes.document: - """Create a new docutils document.""" - settings = OptionParser(components=(RSTParser,)).get_default_values() - return new_document(source_path, settings=settings) + :param tokens: list on block tokens to render + :param options: params of parser instance + :param env: the environment sandbox associated with the tokens, + containing additional metadata like reference info + """ + self.setup_render(options, env) + + # propagate line number down to inline elements + for token in tokens: + if token.map: + # For docutils we want 1 based line numbers (not 0) + token.map = [token.map[0] + 1, token.map[1] + 1] + for child in token.children or []: + child.map = token.map + + # nest tokens + tokens = nest_tokens(tokens) + + # move footnote definitions to env + self.env.setdefault("foot_refs", []) + new_tokens = [] + for token in tokens: + if token.type == "footnote_reference_open": + self.env["foot_refs"].append(token) + else: + new_tokens.append(token) + tokens = new_tokens + + # render + for i, token in enumerate(tokens): + # skip hidden? + if f"render_{token.type}" in self.rules: + self.rules[f"render_{token.type}"](token) + else: + # TODO make this reporter warning + print(f"no render method for: {token.type}") - def add_line_and_source_path(self, node, token): - """Copy the line number and document source path to the docutils node.""" - try: - node.line = token.position.line_start + 1 - except (AttributeError, TypeError): - pass - node.source = self.document["source"] + # TODO log warning for duplicate reference definitions + + if not self.config.get("output_footnotes", True): + return self.document + + # add footnotes + referenced = { + v["label"] for v in self.env.get("footnotes", {}).get("list", {}).values() + } + # only output referenced + foot_refs = [f for f in self.env["foot_refs"] if f.meta["label"] in referenced] + + if foot_refs: + self.current_node.append(nodes.transition()) + for footref in foot_refs: # TODO sort by referenced + self.render_footnote_reference_open(footref) - def nested_render_text(self, text: str, lineno: int, token): + return self.document + + def nested_render_text(self, text: str, lineno: int, disable_front_matter=True): """Render unparsed text.""" - lines = SourceLines( - text, - start_line=lineno, - uri=self.document["source"], - metadata=token.position.data, - standardize_ends=True, - ) - doc_token = myst_block_tokens.Document.read( - lines, front_matter=True, reset_definitions=False - ) - # TODO think if this is the best way: here we consume front matter, - # but then remove it. this is for example if includes have front matter - doc_token.front_matter = None - # we mark the token as nested so that footnotes etc aren't rendered - doc_token.is_nested = True - self.render(doc_token) - def render_children(self, token): - for child in token.children: - self.render(child) + if disable_front_matter: + # parse without front matter + with self.md.reset_rules(): + self.md.disable("front_matter", True) + tokens = self.md.parse(text, self.env) + else: + tokens = self.md.parse(text, self.env) + if tokens[0].type == "front_matter": + tokens.pop(0) + + # set correct line numbers + for token in tokens: + if token.map: + token.map = [token.map[0] + lineno, token.map[1] + lineno] + for child in token.children or []: + child.map = token.map + + # nest tokens + tokens = nest_tokens(tokens) + + # move footnote definitions to env + self.env.setdefault("foot_refs", []) + new_tokens = [] + for token in tokens: + if token.type == "footnote_reference_open": + self.env["foot_refs"].append(token) + else: + new_tokens.append(token) + tokens = new_tokens + + # render + for i, token in enumerate(tokens): + # skip hidden? + if f"render_{token.type}" in self.rules: + self.rules[f"render_{token.type}"](token) + else: + # TODO make this reporter warning + print(f"no render method for: {token.type}") @contextmanager def current_node_context(self, node, append: bool = False): @@ -145,191 +173,129 @@ def current_node_context(self, node, append: bool = False): yield self.current_node = current_node - def render_document(self, token: block_tokens.Document): - if token.front_matter: - self.render_front_matter(token.front_matter) - self.render_children(token) - - if getattr(token, "is_nested", False): - # if the document is nested in another, we don't want to output footnotes - return self.document - - # we use the footnotes stored in the global context, - # rather than those stored on the document, - # since additional references may have been made in nested parses - footnotes = get_parse_context().foot_definitions - - # we don't use the foot_references stored on the global context, - # since references within directives/roles will have been added after - # those from the initial markdown parse - # instead we gather them from a walk of the created document - # foot_refs = get_parse_context().foot_references - foot_refs = OrderedDict() - for refnode in self.document.traverse(nodes.footnote_reference): - if refnode["refname"] not in foot_refs: - foot_refs[refnode["refname"]] = True + def render_children(self, token): + for i, child in enumerate(token.children or []): + if f"render_{child.type}" in self.rules: + self.rules[f"render_{child.type}"](child) + else: + print(f"no render method for: {child.type}") - if foot_refs: - self.current_node.append(nodes.transition()) - for footref in foot_refs: - if footref in footnotes: - self.render_footnote(footnotes[footref]) + def add_line_and_source_path(self, node, token): + """Copy the line number and document source path to the docutils node.""" + try: + node.line = token.map[0] + 1 + except (AttributeError, TypeError): + pass + node.source = self.document["source"] - return self.document + def is_section_level(self, level, section): + return self._level_to_elem.get(level, None) == section - def render_front_matter(self, token): - """Pass document front matter data + def add_section(self, section, level): + parent_level = max( + section_level + for section_level in self._level_to_elem + if level > section_level + ) + parent = self._level_to_elem[parent_level] + parent.append(section) + self._level_to_elem[level] = section - For RST, all field lists are captured by - ``docutils.docutils.parsers.rst.states.Body.field_marker``, - then, if one occurs at the document, it is transformed by - `docutils.docutils.transforms.frontmatter.DocInfo`, and finally - this is intercepted by sphinx and added to the env in - `sphinx.environment.collectors.metadata.MetadataCollector.process_doc` + # Prune level to limit + self._level_to_elem = dict( + (section_level, section) + for section_level, section in self._level_to_elem.items() + if section_level <= level + ) - So technically the values should be parsed to AST, but this is redundant, - since `process_doc` just converts them back to text. + def renderInlineAsText(self, tokens: List[Token]) -> str: + """Special kludge for image `alt` attributes to conform CommonMark spec. + Don't try to use it! Spec requires to show `alt` content with stripped markup, + instead of simple escaping. """ - try: - data = token.get_data() - except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: - msg_node = self.reporter.error( - "Front matter block:\n" + str(error), line=token.position.line_start - ) - msg_node += nodes.literal_block(token.content, token.content) - self.current_node += [msg_node] - return + result = "" - docinfo = dict_to_docinfo(data) - self.current_node.append(docinfo) + for token in tokens or []: + if token.type == "text": + result += token.content + # elif token.type == "image": + # result += self.renderInlineAsText(token.children) + else: + result += self.renderInlineAsText(token.children) - def render_footnote(self, token: block_tokens_ext.Footnote): - footnote = nodes.footnote() - self.add_line_and_source_path(footnote, token) - # footnote += nodes.label('', token.target) - footnote["names"].append(token.target) - footnote["auto"] = 1 - self.document.note_autofootnote(footnote) - self.document.note_explicit_target(footnote, footnote) - # TODO for now we wrap the content (which are list of spans tokens) - # in a paragraph, but eventually upstream in mistletoe this will already be - # block level tokens - self.current_node.append(footnote) - paragraph = nodes.paragraph("") - self.add_line_and_source_path(paragraph, token) - footnote.append(paragraph) - with self.current_node_context(paragraph, append=False): - self.render_children(token) + return result - def render_foot_reference(self, token): - """Footnote references are added as auto-numbered, - .i.e. `[^a]` is read as rST `[#a]_` - """ - refnode = nodes.footnote_reference("[^{}]".format(token.target)) - self.add_line_and_source_path(refnode, token) - refnode["auto"] = 1 - refnode["refname"] = token.target - # refnode += nodes.Text(token.target) - self.document.note_autofootnote_ref(refnode) - self.document.note_footnote_ref(refnode) - self.current_node.append(refnode) + # ### render methods for commonmark tokens - def render_paragraph(self, token): - if len(token.children) == 1 and isinstance( - token.children[0], myst_span_tokens.Target - ): - # promote the target to block level - return self.render_target(token.children[0]) + def render_paragraph_open(self, token): para = nodes.paragraph("") self.add_line_and_source_path(para, token) with self.current_node_context(para, append=True): self.render_children(token) - def render_line_comment(self, token): - self.current_node.append(nodes.comment(token.content, token.content)) + def render_inline(self, token): + self.render_children(token) - def render_target(self, token): - text = token.children[0].content - name = nodes.fully_normalize_name(text) - target = nodes.target(text) - target["names"].append(name) - self.add_line_and_source_path(target, token) - self.document.note_explicit_target(target, self.current_node) - self.current_node.append(target) + def render_text(self, token): + self.current_node.append(nodes.Text(token.content, token.content)) - def render_raw_text(self, token): - text = token.content - self.current_node.append(nodes.Text(text, text)) + def render_bullet_list_open(self, token): + list_node = nodes.bullet_list() + self.add_line_and_source_path(list_node, token) + with self.current_node_context(list_node, append=True): + self.render_children(token) - def render_escape_sequence(self, token): - text = token.children[0].content - self.current_node.append(nodes.Text(text, text)) + def render_ordered_list_open(self, token): + list_node = nodes.enumerated_list() + self.add_line_and_source_path(list_node, token) + with self.current_node_context(list_node, append=True): + self.render_children(token) - def render_line_break(self, token): - if token.soft: - self.current_node.append(nodes.Text("\n")) - else: - self.current_node.append(nodes.raw("", "
\n", format="html")) + def render_list_item_open(self, token): + item_node = nodes.list_item() + self.add_line_and_source_path(item_node, token) + with self.current_node_context(item_node, append=True): + self.render_children(token) - def render_strong(self, token): - node = nodes.strong() + def render_em_open(self, token): + node = nodes.emphasis() self.add_line_and_source_path(node, token) with self.current_node_context(node, append=True): self.render_children(token) - def render_emphasis(self, token): - node = nodes.emphasis() + def render_softbreak(self, token): + self.current_node.append(nodes.Text("\n")) + + def render_strong_open(self, token): + node = nodes.strong() self.add_line_and_source_path(node, token) with self.current_node_context(node, append=True): self.render_children(token) - def render_quote(self, token): + def render_blockquote_open(self, token): quote = nodes.block_quote() self.add_line_and_source_path(quote, token) with self.current_node_context(quote, append=True): self.render_children(token) - def render_strikethrough(self, token): - # TODO there is no existing node/role for this - raise NotImplementedError - - def render_thematic_break(self, token): + def render_hr(self, token): node = nodes.transition() self.add_line_and_source_path(node, token) self.current_node.append(node) - def render_block_break(self, token): - block_break = nodes.comment(token.content, token.content) - block_break["classes"] += ["block_break"] - self.add_line_and_source_path(block_break, token) - self.current_node.append(block_break) - - def render_math(self, token): - if token.content.startswith("$$"): - content = token.content[2:-2] - node = nodes.math_block(content, content, nowrap=False, number=None) - else: - content = token.content[1:-1] - node = nodes.math(content, content) + def render_code_inline(self, token): + node = nodes.literal(token.content, token.content) self.add_line_and_source_path(node, token) self.current_node.append(node) - def render_block_code(self, token): - # this should never have a language, since it is just indented text, however, - # creating a literal_block with no language will raise a warning in sphinx - text = token.children[0].content - language = token.language or "none" - node = nodes.literal_block(text, text, language=language) - self.add_line_and_source_path(node, token) - self.current_node.append(node) + def render_fence(self, token): + text = token.content + language = token.info.split()[0] if token.info else "" - def render_code_fence(self, token): - if token.language.startswith("{") and token.language.endswith("}"): + if language.startswith("{") and language.endswith("}"): return self.render_directive(token) - text = token.children[0].content - language = token.language if not language: try: sphinx_env = self.document.settings.env @@ -344,36 +310,12 @@ def render_code_fence(self, token): self.add_line_and_source_path(node, token) self.current_node.append(node) - def render_inline_code(self, token): - text = token.children[0].content - node = nodes.literal(text, text) - self.add_line_and_source_path(node, token) - self.current_node.append(node) - - def _is_section_level(self, level, section): - return self._level_to_elem.get(level, None) == section - - def _add_section(self, section, level): - parent_level = max( - section_level - for section_level in self._level_to_elem - if level > section_level - ) - parent = self._level_to_elem[parent_level] - parent.append(section) - self._level_to_elem[level] = section - - # Prune level to limit - self._level_to_elem = dict( - (section_level, section) - for section_level, section in self._level_to_elem.items() - if section_level <= level - ) - - def render_heading(self, token): + def render_heading_open(self, token): # Test if we're replacing a section level first + + level = int(token.tag[1]) if isinstance(self.current_node, nodes.section): - if self._is_section_level(token.level, self.current_node): + if self.is_section_level(level, self.current_node): self.current_node = self.current_node.parent title_node = nodes.title() @@ -383,7 +325,9 @@ def render_heading(self, token): self.add_line_and_source_path(new_section, token) new_section.append(title_node) - self._add_section(new_section, token.level) + # TODO add extra virtual section for non-consecutive levels + # (e.g. 1 to 3) or raise warning? + self.add_section(new_section, level) self.current_node = title_node self.render_children(token) @@ -398,21 +342,16 @@ def render_heading(self, token): self.document.note_implicit_target(section, section) self.current_node = section - def handle_cross_reference(self, token, destination): - # TODO use the docutils error reporting mechanisms, rather than raising - if not self.config.get("ignore_missing_refs", False): - raise NotImplementedError( - "reference not found in current document: {}\n{}".format( - destination, token - ) - ) + def render_link_open(self, token): + if token.markup == "autolink": + return self.render_autolink(token) - def render_link(self, token): ref_node = nodes.reference() self.add_line_and_source_path(ref_node, token) # Check destination is supported for cross-linking and remove extension # TODO escape urls? - destination = token.target + destination = token.attrGet("href") + title = token.attrGet("title") _, ext = splitext(destination) # TODO check for other supported extensions, such as those specified in # the Sphinx conf.py file but how to access this information? @@ -422,8 +361,9 @@ def render_link(self, token): # if ext.replace('.', '') in self.supported: # destination = destination.replace(ext, '') ref_node["refuri"] = destination - if token.title: - ref_node["title"] = token.title + + if title: + ref_node["title"] = title next_node = ref_node url_check = urlparse(destination) @@ -443,116 +383,150 @@ def render_link(self, token): with self.current_node_context(ref_node): self.render_children(token) + def handle_cross_reference(self, token, destination): + # TODO use the docutils error reporting mechanisms, rather than raising + if not self.config.get("ignore_missing_refs", False): + raise NotImplementedError( + "reference not found in current document: {} (lines: {})".format( + destination, token.map + ) + ) + + def render_autolink(self, token): + refuri = target = escapeHtml(token.attrGet("href")) + ref_node = nodes.reference(target, target, refuri=refuri) + self.add_line_and_source_path(ref_node, token) + self.current_node.append(ref_node) + + def render_html_inline(self, token): + self.current_node.append(nodes.raw("", token.content, format="html")) + + def render_html_block(self, token): + self.current_node.append(nodes.raw("", token.content, format="html")) + def render_image(self, token): img_node = nodes.image() self.add_line_and_source_path(img_node, token) - img_node["uri"] = token.src - - img_node["alt"] = "" - if token.children and isinstance(token.children[0], span_tokens.RawText): - img_node["alt"] = token.children[0].content - token.children[0].content = "" + img_node["uri"] = token.attrGet("src") + # TODO ideally we would render proper markup here + img_node["alt"] = self.renderInlineAsText(token.children) self.current_node.append(img_node) - # TODO how should non-raw alternative text be handled? - # with self.set_current_node(img_node): - # self.render_children(token) - - def render_list(self, token): - list_node = None - if token.start_at is not None: - list_node = nodes.enumerated_list() - # TODO deal with token.start_at? - # TODO support numerals/letters for lists - # (see https://stackoverflow.com/a/48372856/5033292) - # See docutils/docutils/parsers/rst/states.py:Body.enumerator - # list_node['enumtype'] = 'arabic', 'loweralpha', 'upperroman', etc. - # list_node['start'] - # list_node['prefix'] - # list_node['suffix'] - else: - list_node = nodes.bullet_list() - # TODO deal with token.loose? - self.add_line_and_source_path(list_node, token) - self.current_node.append(list_node) - with self.current_node_context(list_node): - self.render_children(token) + # ### render methods for plugin tokens - def render_list_item(self, token: myst_block_tokens.ListItem): - item_node = nodes.list_item() - self.add_line_and_source_path(item_node, token) - self.current_node.append(item_node) - with self.current_node_context(item_node): - self.render_children(token) + def render_front_matter(self, token): + """Pass document front matter data - def render_table(self, token): - table = nodes.table() - table["classes"] += ["colwidths-auto"] - # TODO column alignment - maxcols = max(len(c.children) for c in token.children) - # TODO are colwidths actually required - colwidths = [100 / maxcols] * maxcols - tgroup = nodes.tgroup(cols=len(colwidths)) - table += tgroup - for colwidth in colwidths: - colspec = nodes.colspec(colwidth=colwidth) - tgroup += colspec - - if hasattr(token, "header"): - thead = nodes.thead() - tgroup += thead - with self.current_node_context(thead): - self.render_table_row(token.header) - - tbody = nodes.tbody() - tgroup += tbody - - with self.current_node_context(tbody): - self.render_children(token) + For RST, all field lists are captured by + ``docutils.docutils.parsers.rst.states.Body.field_marker``, + then, if one occurs at the document, it is transformed by + `docutils.docutils.transforms.frontmatter.DocInfo`, and finally + this is intercepted by sphinx and added to the env in + `sphinx.environment.collectors.metadata.MetadataCollector.process_doc` - self.current_node.append(table) + So technically the values should be parsed to AST, but this is redundant, + since `process_doc` just converts them back to text. - def render_table_row(self, token): - row = nodes.row() - with self.current_node_context(row, append=True): - self.render_children(token) + """ + try: + data = yaml.safe_load(token.content) + except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: + msg_node = self.reporter.error( + "Front matter block:\n" + str(error), line=token.map[0] + 1 + ) + msg_node += nodes.literal_block(token.content, token.content) + self.current_node += [msg_node] + return + + docinfo = dict_to_docinfo(data) + self.current_node.append(docinfo) + + # def render_table_open(self, token): + # # print(token) + # # raise + + # table = nodes.table() + # table["classes"] += ["colwidths-auto"] + # self.add_line_and_source_path(table, token) + + # thead = nodes.thead() + # # TODO there can never be more than one header row (at least in mardown-it) + # header = token.children[0].children[0] + # for hrow in header.children: + # nodes.t + # style = hrow.attrGet("style") + + # tgroup = nodes.tgroup(cols) + # table += tgroup + # tgroup += thead + + def render_math_inline(self, token): + content = token.content + node = nodes.math(content, content) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_math_block(self, token): + content = token.content + node = nodes.math_block(content, content, nowrap=False, number=None) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_footnote_ref(self, token): + """Footnote references are added as auto-numbered, + .i.e. `[^a]` is read as rST `[#a]_` + """ + # TODO we now also have ^[a] the inline version (currently disabled) + # that would be rendered here + target = token.meta["label"] + refnode = nodes.footnote_reference("[^{}]".format(target)) + self.add_line_and_source_path(refnode, token) + refnode["auto"] = 1 + refnode["refname"] = target + # refnode += nodes.Text(token.target) + self.document.note_autofootnote_ref(refnode) + self.document.note_footnote_ref(refnode) + self.current_node.append(refnode) - def render_table_cell(self, token): - entry = nodes.entry() - with self.current_node_context(entry, append=True): + def render_footnote_reference_open(self, token): + target = token.meta["label"] + footnote = nodes.footnote() + self.add_line_and_source_path(footnote, token) + footnote["names"].append(target) + footnote["auto"] = 1 + self.document.note_autofootnote(footnote) + self.document.note_explicit_target(footnote, footnote) + with self.current_node_context(footnote, append=True): self.render_children(token) - def render_auto_link(self, token): - if token.mailto: - refuri = "mailto:{}".format(token.target) - else: - refuri = escape_url(token.target) - ref_node = nodes.reference(token.target, token.target, refuri=refuri) - self.add_line_and_source_path(ref_node, token) - self.current_node.append(ref_node) + def render_myst_block_break(self, token): + block_break = nodes.comment(token.content, token.content) + block_break["classes"] += ["block_break"] + self.add_line_and_source_path(block_break, token) + self.current_node.append(block_break) - def render_html_span(self, token): - self.current_node.append(nodes.raw("", token.content, format="html")) + def render_myst_target(self, token): + text = token.content + name = nodes.fully_normalize_name(text) + target = nodes.target(text) + target["names"].append(name) + self.add_line_and_source_path(target, token) + self.document.note_explicit_target(target, self.current_node) + self.current_node.append(target) - def render_html_block(self, token): - self.current_node.append(nodes.raw("", token.content, format="html")) + def render_myst_line_comment(self, token): + self.current_node.append(nodes.comment(token.content, token.content)) - def render_role(self, token): - content = token.children[0].content - name = token.role_name - # TODO role name white/black lists - try: - lineno = token.position.line_start - except (AttributeError, TypeError): - lineno = 0 - inliner = MockInliner(self, lineno) + def render_myst_role(self, token): + name = token.meta["name"] + text = token.content + rawsource = f":{name}:`{token.content}`" + lineno = token.map[0] if token.map else 0 role_func, messages = roles.role( name, self.language_module, lineno, self.reporter ) - rawsource = ":{}:`{}`".format(name, content) - # # backslash escapes converted to nulls (``\x00``) - text = span_tokens.EscapeSequence.strip(content) + inliner = MockInliner(self, lineno) if role_func: nodes, messages2 = role_func(name, rawsource, text, lineno, inliner) # return nodes, messages + messages2 @@ -561,16 +535,18 @@ def render_role(self, token): message = self.reporter.error( 'Unknown interpreted text role "{}".'.format(name), line=lineno ) - # return ([self.problematic(content, content, msg)], messages + [msg]) problematic = inliner.problematic(text, rawsource, message) self.current_node += problematic - def render_directive(self, token): + def render_directive(self, token: Token): """Render special fenced code blocks as directives.""" - name = token.language[1:-1] + first_line = token.info.split(maxsplit=1) + name = first_line[0][1:-1] + arguments = "" if len(first_line) == 1 else first_line[1] # TODO directive name white/black lists - content = token.children[0].content - self.document.current_line = token.position.line_start + content = token.content + position = token.map[0] + self.document.current_line = position # get directive class directive_class, messages = directives.directive( @@ -580,20 +556,20 @@ def render_directive(self, token): error = self.reporter.error( "Unknown directive type '{}'\n".format(name), # nodes.literal_block(content, content), - line=token.position.line_start, + line=position, ) self.current_node += [error] + messages return try: arguments, options, body_lines = parse_directive_text( - directive_class, token.arguments, content + directive_class, arguments, content ) except DirectiveParsingError as error: error = self.reporter.error( "Directive '{}':\n{}".format(name, error), nodes.literal_block(content, content), - line=token.position.line_start, + line=position, ) self.current_node += [error] return @@ -610,10 +586,8 @@ def render_directive(self, token): token=token, ) else: - state_machine = MockStateMachine(self, token.position.line_start) - state = MockState( - self, state_machine, token.position.line_start, token=token - ) + state_machine = MockStateMachine(self, position) + state = MockState(self, state_machine, position) directive_instance = directive_class( name=name, # the list of positional arguments @@ -623,7 +597,7 @@ def render_directive(self, token): # the directive content line by line content=StringList(body_lines, self.document["source"]), # the absolute line number of the first line of the directive - lineno=token.position.line_start, + lineno=position, # the line offset of the first line of the content content_offset=0, # TODO get content offset from `parse_directive_text` # a string containing the entire directive @@ -637,7 +611,7 @@ def render_directive(self, token): result = directive_instance.run() except DirectiveError as error: msg_node = self.reporter.system_message( - error.level, error.msg, line=token.position.line_start + error.level, error.msg, line=position ) msg_node += nodes.literal_block(content, content) result = [msg_node] @@ -647,7 +621,7 @@ def render_directive(self, token): name, exc.__class__.__name__, exc ), nodes.literal_block(content, content), - line=token.position.line_start, + line=position, ) self.current_node += [error] return @@ -663,577 +637,14 @@ def render_directive(self, token): self.current_node += result -class SphinxRenderer(DocutilsRenderer): - """A mistletoe renderer to populate (in-place) a `docutils.document` AST. - - This is sub-class of `DocutilsRenderer` that handles sphinx cross-referencing. - """ - - def __init__(self, *args, **kwargs): - """Intitalise SphinxRenderer - - :param load_sphinx_env: load a basic sphinx environment, - when using the renderer as a context manager outside if `sphinx-build` - :param sphinx_conf: a dictionary representation of the sphinx `conf.py` - :param sphinx_srcdir: a path to a source directory - (for example, can be used for `include` statements) - - To use this renderer in a 'standalone' fashion:: - - from myst_parser.block_tokens import Document - - with SphinxRenderer(load_sphinx_env=True, sphinx_conf={}) as renderer: - renderer.render(Document.read("source text")) - - """ - self.load_sphinx_env = kwargs.pop("load_sphinx_env", False) - self.sphinx_conf = kwargs.pop("sphinx_conf", None) - self.sphinx_srcdir = kwargs.pop("sphinx_srcdir", None) - super().__init__(*args, **kwargs) - - def handle_cross_reference(self, token, destination): - from sphinx import addnodes - - wrap_node = addnodes.pending_xref( - reftarget=unquote(destination), - reftype="any", - refdomain=None, # Added to enable cross-linking - refexplicit=len(token.children) > 0, - refwarn=True, - ) - self.add_line_and_source_path(wrap_node, token) - if token.title: - wrap_node["title"] = token.title - self.current_node.append(wrap_node) - text_node = nodes.literal("", "", classes=["xref", "any"]) - wrap_node.append(text_node) - with self.current_node_context(text_node): - self.render_children(token) - - def mock_sphinx_env(self, configuration=None, sourcedir=None): - """Create a minimimal Sphinx environment; - loading sphinx roles, directives, etc. - """ - from sphinx.application import builtin_extensions, Sphinx - from sphinx.config import Config - from sphinx.environment import BuildEnvironment - from sphinx.events import EventManager - from sphinx.project import Project - from sphinx.registry import SphinxComponentRegistry - from sphinx.util.tags import Tags - - class MockSphinx(Sphinx): - """Minimal sphinx init to load roles and directives.""" - - def __init__(self, confoverrides=None, srcdir=None): - self.extensions = {} - self.registry = SphinxComponentRegistry() - self.html_themes = {} - self.events = EventManager(self) - self.tags = Tags(None) - self.config = Config({}, confoverrides or {}) - self.config.pre_init_values() - self._init_i18n() - for extension in builtin_extensions: - self.registry.load_extension(self, extension) - # fresh env - self.doctreedir = None - self.srcdir = srcdir - self.confdir = None - self.outdir = None - self.project = Project(srcdir=srcdir, source_suffix=".md") - self.project.docnames = ["mock_docname"] - self.env = BuildEnvironment() - self.env.setup(self) - self.env.temp_data["docname"] = "mock_docname" - self.builder = None - - if not confoverrides: - return - - # this code is only required for more complex parsing with extensions - for extension in self.config.extensions: - self.setup_extension(extension) - buildername = "dummy" - self.preload_builder(buildername) - self.config.init_values() - self.events.emit("config-inited", self.config) - import tempfile - - with tempfile.TemporaryDirectory() as tempdir: - # creating a builder attempts to make the doctreedir - self.doctreedir = tempdir - self.builder = self.create_builder(buildername) - self.doctreedir = None - - app = MockSphinx(confoverrides=configuration, srcdir=sourcedir) - self.document.settings.env = app.env - return app - - def __enter__(self): - """If `load_sphinx_env=True`, we set up an environment, - to parse sphinx roles/directives, outside of a `sphinx-build`. - - This primarily copies the code in `sphinx.util.docutils.docutils_namespace` - and `sphinx.util.docutils.sphinx_domains`. - """ - if not self.load_sphinx_env: - return super().__enter__() - - # store currently loaded roles/directives, so we can revert on exit - self._directives = copy.copy(directives._directives) - self._roles = copy.copy(roles._roles) - # Monkey-patch directive and role dispatch, - # so that sphinx domain-specific markup takes precedence. - self._env = self.mock_sphinx_env( - configuration=self.sphinx_conf, sourcedir=self.sphinx_srcdir - ).env - from sphinx.util.docutils import sphinx_domains - - self._sphinx_domains = sphinx_domains(self._env) - self._sphinx_domains.enable() - - return super().__enter__() - - def __exit__(self, exception_type, exception_val, traceback): - if not self.load_sphinx_env: - return super().__exit__(exception_type, exception_val, traceback) - # revert loaded roles/directives - directives._directives = self._directives - roles._roles = self._roles - self._directives = None - self._roles = None - # unregister nodes (see `sphinx.util.docutils.docutils_namespace`) - from sphinx.util.docutils import additional_nodes, unregister_node - - for node in list(additional_nodes): - unregister_node(node) - additional_nodes.discard(node) - # revert directive/role function (see `sphinx.util.docutils.sphinx_domains`) - self._sphinx_domains.disable() - self._sphinx_domains = None - self._env = None - return super().__exit__(exception_type, exception_val, traceback) - - -class MockingError(Exception): - """An exception to signal an error during mocking of docutils components.""" - - -class MockInliner: - """A mock version of `docutils.parsers.rst.states.Inliner`. - - This is parsed to role functions. - """ - - def __init__(self, renderer: DocutilsRenderer, lineno: int): - self._renderer = renderer - self.document = renderer.document - self.reporter = renderer.document.reporter - if not hasattr(self.reporter, "get_source_and_line"): - # TODO this is called by some roles, - # but I can't see how that would work in RST? - self.reporter.get_source_and_line = lambda l: (self.document["source"], l) - self.parent = renderer.current_node - self.language = renderer.language_module - self.rfc_url = "rfc%d.html" - - def problematic(self, text: str, rawsource: str, message: nodes.system_message): - msgid = self.document.set_id(message, self.parent) - problematic = nodes.problematic(rawsource, rawsource, refid=msgid) - prbid = self.document.set_id(problematic) - message.add_backref(prbid) - return problematic - - # TODO add parse method - - def __getattr__(self, name): - """This method is only be called if the attribute requested has not - been defined. Defined attributes will not be overridden. - """ - # TODO use document.reporter mechanism? - if hasattr(Inliner, name): - msg = "{cls} has not yet implemented attribute '{name}'".format( - cls=type(self).__name__, name=name - ) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - - -class MockState: - """A mock version of `docutils.parsers.rst.states.RSTState`. - - This is parsed to the `Directives.run()` method, - so that they may run nested parses on their content that will be parsed as markdown, - rather than RST. - """ - - def __init__( - self, - renderer: DocutilsRenderer, - state_machine: "MockStateMachine", - lineno: int, - token, - ): - self._renderer = renderer - self._lineno = lineno - self._token = token - self.document = renderer.document - self.state_machine = state_machine - - class Struct: - document = self.document - reporter = self.document.reporter - language = self.document.settings.language_code - title_styles = [] - section_level = max(renderer._level_to_elem) - section_bubble_up_kludge = False - inliner = MockInliner(renderer, lineno) - - self.memo = Struct - - def nested_parse( - self, - block: StringList, - input_offset: int, - node: nodes.Element, - match_titles: bool = False, - state_machine_class=None, - state_machine_kwargs=None, - ): - current_match_titles = self.state_machine.match_titles - self.state_machine.match_titles = match_titles - with self._renderer.current_node_context(node): - self._renderer.nested_render_text( - block, self._lineno + input_offset, token=self._token - ) - self.state_machine.match_titles = current_match_titles - - def inline_text(self, text: str, lineno: int): - # TODO return messages? - messages = [] - paragraph = nodes.paragraph("") - # here we instatiate a new renderer, - # so that the nested parse does not effect the current renderer, - # but we use the same global parse context, so that link references, etc - # are added to the global parse. - renderer = self._renderer.__class__( - document=self.document, - current_node=paragraph, - parse_context=get_parse_context(), - ) - lines = SourceLines( - text, - start_line=self._lineno, - uri=self.document["source"], - metadata=self._token.position.data, - standardize_ends=True, - ) - doc_token = myst_block_tokens.Document.read( - lines, front_matter=False, reset_definitions=False - ) - # we mark the token as nested so that footnotes etc aren't rendered - doc_token.is_nested = True - renderer.render(doc_token) - textnodes = [] - if paragraph.children: - # first child should be paragraph - textnodes = paragraph.children[0].children - return textnodes, messages - - # U+2014 is an em-dash: - attribution_pattern = re.compile("^((?:---?(?!-)|\u2014) *)(.+)") - - def block_quote(self, lines: List[str], line_offset: int): - """Parse a block quote, which is a block of text, - followed by an (optional) attribution. - - :: - - No matter where you go, there you are. - - -- Buckaroo Banzai - """ - elements = [] - # split attribution - last_line_blank = False - blockquote_lines = lines - attribution_lines = [] - attribution_line_offset = None - # First line after a blank line must begin with a dash - for i, line in enumerate(lines): - if not line.strip(): - last_line_blank = True - continue - if not last_line_blank: - last_line_blank = False - continue - last_line_blank = False - match = self.attribution_pattern.match(line) - if not match: - continue - attribution_line_offset = i - attribution_lines = [match.group(2)] - for at_line in lines[i + 1 :]: - indented_line = at_line[len(match.group(1)) :] - if len(indented_line) != len(at_line.lstrip()): - break - attribution_lines.append(indented_line) - blockquote_lines = lines[:i] - break - # parse block - blockquote = nodes.block_quote() - self.nested_parse(blockquote_lines, line_offset, blockquote) - elements.append(blockquote) - # parse attribution - if attribution_lines: - attribution_text = "\n".join(attribution_lines) - lineno = self._lineno + line_offset + attribution_line_offset - textnodes, messages = self.inline_text(attribution_text, lineno) - attribution = nodes.attribution(attribution_text, "", *textnodes) - ( - attribution.source, - attribution.line, - ) = self.state_machine.get_source_and_line(lineno) - blockquote += attribution - elements += messages - return elements - - def build_table(self, tabledata, tableline, stub_columns=0, widths=None): - return Body.build_table(self, tabledata, tableline, stub_columns, widths) - - def build_table_row(self, rowdata, tableline): - return Body.build_table_row(self, rowdata, tableline) - - def __getattr__(self, name): - """This method is only be called if the attribute requested has not - been defined. Defined attributes will not be overridden. - """ - if hasattr(Body, name): - msg = "{cls} has not yet implemented attribute '{name}'".format( - cls=type(self).__name__, name=name - ) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - - -class MockStateMachine: - """A mock version of `docutils.parsers.rst.states.RSTStateMachine`. - - This is parsed to the `Directives.run()` method. - """ - - def __init__(self, renderer: DocutilsRenderer, lineno: int): - self._renderer = renderer - self._lineno = lineno - self.document = renderer.document - self.reporter = self.document.reporter - self.node = renderer.current_node - self.match_titles = True - - # TODO to allow to access like attributes like input_lines, - # we would need to store the input lines, - # probably via the `Document` token, - # and maybe self._lines = lines[:], then for AstRenderer, - # ignore private attributes - - def get_source(self, lineno: Optional[int] = None): - """Return document source path.""" - return self.document["source"] - - def get_source_and_line(self, lineno: Optional[int] = None): - """Return (source path, line) tuple for current or given line number.""" - return self.document["source"], lineno or self._lineno - - def __getattr__(self, name): - """This method is only be called if the attribute requested has not - been defined. Defined attributes will not be overridden. - """ - if hasattr(RSTStateMachine, name): - msg = "{cls} has not yet implemented attribute '{name}'".format( - cls=type(self).__name__, name=name - ) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - - -class MockIncludeDirective: - """This directive uses a lot of statemachine logic that is not yet mocked. - Therefore, we treat it as a special case (at least for now). - - See: - https://docutils.sourceforge.io/docs/ref/rst/directives.html#including-an-external-document-fragment - """ - - def __init__( - self, - renderer: DocutilsRenderer, - name: str, - klass: Include, - arguments: list, - options: dict, - body: List[str], - token, - ): - self.renderer = renderer - self.document = renderer.document - self.name = name - self.klass = klass - self.arguments = arguments - self.options = options - self.body = body - self.lineno = token.position.line_start - self.token = token - - def run(self): - - from docutils.parsers.rst.directives.body import CodeBlock, NumberLines - - if not self.document.settings.file_insertion_enabled: - raise DirectiveError(2, 'Directive "{}" disabled.'.format(self.name)) - - source_dir = Path(self.document["source"]).absolute().parent - include_arg = "".join([s.strip() for s in self.arguments[0].splitlines()]) - - if include_arg.startswith("<") and include_arg.endswith(">"): - # # docutils "standard" includes - path = Path(self.klass.standard_include_path).joinpath(include_arg[1:-1]) - else: - # if using sphinx interpret absolute paths "correctly", - # i.e. relative to source directory - try: - sphinx_env = self.document.settings.env - _, include_arg = sphinx_env.relfn2path(self.arguments[0]) - sphinx_env.note_included(include_arg) - except AttributeError: - pass - path = Path(include_arg) - path = source_dir.joinpath(path) - - # read file - encoding = self.options.get("encoding", self.document.settings.input_encoding) - error_handler = self.document.settings.input_encoding_error_handler - # tab_width = self.options.get("tab-width", self.document.settings.tab_width) - try: - file_content = path.read_text(encoding=encoding, errors=error_handler) - except Exception as error: - raise DirectiveError( - 4, - 'Directive "{}": error reading file: {}\n{error}.'.format( - self.name, path, error - ), - ) - - # get required section of text - startline = self.options.get("start-line", None) - endline = self.options.get("end-line", None) - file_content = "\n".join(file_content.splitlines()[startline:endline]) - startline = startline or 0 - for split_on_type in ["start-after", "end-before"]: - split_on = self.options.get(split_on_type, None) - if not split_on: - continue - split_index = file_content.find(split_on) - if split_index < 0: - raise DirectiveError( - 4, - 'Directive "{}"; option "{}": text not found "{}".'.format( - self.name, split_on_type, split_on - ), - ) - if split_on_type == "start-after": - startline += split_index + len(split_on) - file_content = file_content[split_index + len(split_on) :] - else: - file_content = file_content[:split_index] - - if "literal" in self.options: - literal_block = nodes.literal_block( - file_content, source=str(path), classes=self.options.get("class", []) - ) - literal_block.line = 1 # TODO don;t think this should be 1? - self.add_name(literal_block) - if "number-lines" in self.options: - try: - startline = int(self.options["number-lines"] or 1) - except ValueError: - raise DirectiveError( - 3, ":number-lines: with non-integer " "start value" - ) - endline = startline + len(file_content.splitlines()) - if file_content.endswith("\n"): - file_content = file_content[:-1] - tokens = NumberLines([([], file_content)], startline, endline) - for classes, value in tokens: - if classes: - literal_block += nodes.inline(value, value, classes=classes) - else: - literal_block += nodes.Text(value) - else: - literal_block += nodes.Text(file_content) - return [literal_block] - if "code" in self.options: - self.options["source"] = str(path) - state_machine = MockStateMachine(self.renderer, self.lineno) - state = MockState(self.renderer, state_machine, self.lineno, self.token) - codeblock = CodeBlock( - name=self.name, - arguments=[self.options.pop("code")], - options=self.options, - content=file_content.splitlines(), - lineno=self.lineno, - content_offset=0, - block_text=file_content, - state=state, - state_machine=state_machine, - ) - return codeblock.run() - - # Here we perform a nested render, but temporarily setup the document/reporter - # with the correct document path and lineno for the included file. - source = self.renderer.document["source"] - rsource = self.renderer.reporter.source - line_func = getattr(self.renderer.reporter, "get_source_and_line", None) - try: - self.renderer.document["source"] = str(path) - self.renderer.reporter.source = str(path) - self.renderer.reporter.get_source_and_line = lambda l: (str(path), l) - self.renderer.nested_render_text(file_content, startline, token=self.token) - finally: - self.renderer.document["source"] = source - self.renderer.reporter.source = rsource - if line_func is not None: - self.renderer.reporter.get_source_and_line = line_func - else: - del self.renderer.reporter.get_source_and_line - return [] - - def add_name(self, node): - """Append self.options['name'] to node['names'] if it exists. - - Also normalize the name string and register it as explicit target. - """ - if "name" in self.options: - name = nodes.fully_normalize_name(self.options.pop("name")) - if "name" in node: - del node["name"] - node["names"].append(name) - self.renderer.document.note_explicit_target(node, node) - - def dict_to_docinfo(data): """Render a key/val pair as a docutils field node.""" # TODO this data could be used to support default option values for directives docinfo = nodes.docinfo() - # Throw away all non-stringy values - # TODO: support more complex data structures as values for key, value in data.items(): if not isinstance(value, (str, int, float)): - continue + value = json.dumps(value) value = str(value) field_node = nodes.field() field_node.source = value diff --git a/myst_parser/html_renderer.py b/myst_parser/html_renderer.py deleted file mode 100644 index 8214493f..00000000 --- a/myst_parser/html_renderer.py +++ /dev/null @@ -1,139 +0,0 @@ -from typing import Optional - -from mistletoe import block_tokens, block_tokens_ext, span_tokens, span_tokens_ext -from mistletoe.parse_context import ParseContext -from mistletoe.renderers import html as html_renderer - -from myst_parser.block_tokens import LineComment, BlockBreak, Quote, Paragraph, List -from myst_parser.span_tokens import Role, Target - - -class HTMLRenderer(html_renderer.HTMLRenderer): - """This HTML render uses the uses the MyST spec block and span tokens. - - It is used to test compliance with the commonmark spec, - and can be used for basic previews, - but does not run roles/directives, resolve cross-references etc... - """ - - default_block_tokens = ( - block_tokens.HTMLBlock, - LineComment, - block_tokens.BlockCode, - block_tokens.Heading, - Quote, - block_tokens.CodeFence, - block_tokens.ThematicBreak, - BlockBreak, - List, - block_tokens_ext.Table, - block_tokens_ext.Footnote, - block_tokens.LinkDefinition, - Paragraph, - ) - - default_span_tokens = ( - span_tokens.EscapeSequence, - Role, - span_tokens.HTMLSpan, - span_tokens.AutoLink, - Target, - span_tokens.CoreTokens, - span_tokens_ext.FootReference, - span_tokens_ext.Math, - # TODO there is no matching core element in docutils for strikethrough - # span_tokens_ext.Strikethrough, - span_tokens.InlineCode, - span_tokens.LineBreak, - span_tokens.RawText, - ) - - def __init__( - self, - parse_context: Optional[ParseContext] = None, - add_mathjax=False, - as_standalone=False, - add_css=None, - ): - """Intitalise HTML renderer - - :param parse_context: the parse context stores global parsing variables, - such as the block/span tokens to search for, - and link/footnote definitions that have been collected. - If None, a new context will be instatiated, with the default - block/span tokens for this renderer. - These will be re-instatiated on ``__enter__``. - :type parse_context: mistletoe.parse_context.ParseContext - :param add_mathjax: add the mathjax CDN - :param as_standalone: return the HTML body within a minmal HTML page - :param add_css: if as_standalone=True, CSS to add to the header - """ - super().__init__(parse_context=parse_context, as_standalone=False) - - self.mathjax_src = "" - if add_mathjax: - self.mathjax_src = ( - "\n' - ) - self._as_standalone = as_standalone - self._add_css = add_css - - def render_document(self, token): - """ - Optionally Append CDN link for MathJax to the end of . - """ - front_matter = "" - if token.front_matter: - front_matter = ( - '
' - '
{}
' - "
\n" - ).format(self.escape_html(token.front_matter.content)) - body = front_matter + super().render_document(token) + self.mathjax_src - if not self._as_standalone: - return body - return html_renderer.minimal_html_page(body, css=self._add_css or "") - - def render_code_fence(self, token): - if token.language and token.language.startswith("{"): - return self.render_directive(token) - return self.render_block_code(token) - - def render_directive(self, token): - return ( - '
\n' - "
{name} {args}\n{content}
\n" - "
" - ).format( - name=self.escape_html(token.language), - args=self.escape_html(token.arguments), - content=self.escape_html(token.children[0].content), - ) - - def render_line_comment(self, token): - return "".format(self.escape_html(token.content)) - - def render_block_break(self, token): - return '\n
'.format( - self.escape_html(token.content) - ) - - def render_target(self, token): - return ( - '({0})=' - ).format(self.escape_html(token.target)) - - def render_role(self, token): - return ('{{{0}}}{1}').format( - self.escape_html(token.role_name), self.render_raw_text(token.children[0]) - ) - - def render_math(self, token): - """ - Ensure Math tokens are all enclosed in two dollar signs. - """ - if token.content.startswith("$$"): - return self.render_raw_text(token) - return "${}$".format(self.render_raw_text(token)) diff --git a/myst_parser/json_renderer.py b/myst_parser/json_renderer.py deleted file mode 100644 index 0164fbbf..00000000 --- a/myst_parser/json_renderer.py +++ /dev/null @@ -1,43 +0,0 @@ -"""JSON renderer for myst.""" -from mistletoe import block_tokens, block_tokens_ext, span_tokens, span_tokens_ext -from mistletoe.renderers import json - -from myst_parser.block_tokens import LineComment, BlockBreak, Quote, Paragraph, List -from myst_parser.span_tokens import Role, Target - - -class JsonRenderer(json.JsonRenderer): - """This JSON render uses the MyST spec block and span tokens. - """ - - default_block_tokens = ( - block_tokens.HTMLBlock, - LineComment, - block_tokens.BlockCode, - block_tokens.Heading, - Quote, - block_tokens.CodeFence, - block_tokens.ThematicBreak, - BlockBreak, - List, - block_tokens_ext.Table, - block_tokens_ext.Footnote, - block_tokens.LinkDefinition, - Paragraph, - ) - - default_span_tokens = ( - span_tokens.EscapeSequence, - Role, - span_tokens.HTMLSpan, - span_tokens.AutoLink, - Target, - span_tokens.CoreTokens, - span_tokens_ext.FootReference, - span_tokens_ext.Math, - # TODO there is no matching core element in docutils for strikethrough - # span_tokens_ext.Strikethrough, - span_tokens.InlineCode, - span_tokens.LineBreak, - span_tokens.RawText, - ) diff --git a/myst_parser/main.py b/myst_parser/main.py new file mode 100644 index 00000000..acffe99a --- /dev/null +++ b/myst_parser/main.py @@ -0,0 +1,63 @@ +from markdown_it import MarkdownIt +from markdown_it.renderer import RendererHTML +from markdown_it.extensions.front_matter import front_matter_plugin +from markdown_it.extensions.myst_blocks import myst_block_plugin +from markdown_it.extensions.myst_role import myst_role_plugin +from markdown_it.extensions.texmath import texmath_plugin +from markdown_it.extensions.footnote import footnote_plugin + +from myst_parser.docutils_renderer import DocutilsRenderer +from myst_parser.docutils_renderer import make_document + + +def default_parser(renderer="sphinx") -> MarkdownIt: + from myst_parser.sphinx_renderer import SphinxRenderer + + renderers = { + "sphinx": SphinxRenderer, + "docutils": DocutilsRenderer, + "html": RendererHTML, + } + renderer_cls = renderers[renderer] + + md = ( + MarkdownIt("commonmark", renderer_cls=renderer_cls) + .enable("table") + .use(front_matter_plugin) + .use(myst_block_plugin) + .use(myst_role_plugin) + .use(texmath_plugin) + .use(footnote_plugin) + .disable("footnote_inline") + # disable this for now, because it need a new implementation in the renderer + .disable("footnote_tail") + # we don't want to yet remove un-referenced, because they may be referenced + # in admonition type directives + # we need to do our own post process to gather them + # (and also add nodes.transition() above) + ) + return md + + +def to_docutils(text, options=None, env=None, document=None, in_sphinx_env=False): + md = default_parser() + if options: + md.options.update(options) + md.options["document"] = document or make_document() + if in_sphinx_env: + from myst_parser.sphinx_renderer import mock_sphinx_env + + with mock_sphinx_env(document=md.options["document"]): + return md.render(text, env) + else: + return md.render(text, env) + + +def to_html(text, env=None): + md = default_parser("html") + return md.render(text, env) + + +def to_tokens(text, env=None): + md = default_parser() + return md.parse(text, env) diff --git a/myst_parser/mocking.py b/myst_parser/mocking.py new file mode 100644 index 00000000..06cda49f --- /dev/null +++ b/myst_parser/mocking.py @@ -0,0 +1,398 @@ +from pathlib import Path +import re +import sys +from typing import List, Optional + +from docutils import nodes +from docutils.parsers.rst.states import Inliner, RSTStateMachine, Body +from docutils.parsers.rst import DirectiveError +from docutils.parsers.rst.directives.misc import Include +from docutils.statemachine import StringList + + +class MockingError(Exception): + """An exception to signal an error during mocking of docutils components.""" + + +class MockInliner: + """A mock version of `docutils.parsers.rst.states.Inliner`. + + This is parsed to role functions. + """ + + def __init__(self, renderer, lineno: int): + self._renderer = renderer + self.document = renderer.document + self.reporter = renderer.document.reporter + if not hasattr(self.reporter, "get_source_and_line"): + # TODO this is called by some roles, + # but I can't see how that would work in RST? + self.reporter.get_source_and_line = lambda l: (self.document["source"], l) + self.parent = renderer.current_node + self.language = renderer.language_module + self.rfc_url = "rfc%d.html" + + def problematic(self, text: str, rawsource: str, message: nodes.system_message): + msgid = self.document.set_id(message, self.parent) + problematic = nodes.problematic(rawsource, rawsource, refid=msgid) + prbid = self.document.set_id(problematic) + message.add_backref(prbid) + return problematic + + # TODO add parse method + + def __getattr__(self, name): + """This method is only be called if the attribute requested has not + been defined. Defined attributes will not be overridden. + """ + # TODO use document.reporter mechanism? + if hasattr(Inliner, name): + msg = "{cls} has not yet implemented attribute '{name}'".format( + cls=type(self).__name__, name=name + ) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + + +class MockState: + """A mock version of `docutils.parsers.rst.states.RSTState`. + + This is parsed to the `Directives.run()` method, + so that they may run nested parses on their content that will be parsed as markdown, + rather than RST. + """ + + def __init__(self, renderer, state_machine: "MockStateMachine", lineno: int): + self._renderer = renderer + self._lineno = lineno + self.document = renderer.document + self.state_machine = state_machine + + class Struct: + document = self.document + reporter = self.document.reporter + language = self.document.settings.language_code + title_styles = [] + section_level = max(renderer._level_to_elem) + section_bubble_up_kludge = False + inliner = MockInliner(renderer, lineno) + + self.memo = Struct + + def nested_parse( + self, + block: StringList, + input_offset: int, + node: nodes.Element, + match_titles: bool = False, + state_machine_class=None, + state_machine_kwargs=None, + ): + current_match_titles = self.state_machine.match_titles + self.state_machine.match_titles = match_titles + with self._renderer.current_node_context(node): + self._renderer.nested_render_text( + "\n".join(block), self._lineno + input_offset + ) + self.state_machine.match_titles = current_match_titles + + def inline_text(self, text: str, lineno: int): + # TODO return messages? + messages = [] + paragraph = nodes.paragraph("") + + tokens = self._renderer.md.parseInline(text, self._renderer.env) + for token in tokens: + if token.map: + token.map = [token.map[0] + lineno, token.map[1] + lineno] + + # here we instantiate a new renderer, + # so that the nested parse does not effect the current renderer, + # but we use the same env, so that link references, etc + # are added to the global parse. + from myst_parser.docutils_renderer import DocutilsRenderer + + nested_renderer = DocutilsRenderer(self._renderer.md) + options = dict( + document=self.document, current_node=paragraph, output_footnotes=False + ) + nested_renderer.render(tokens, options, self._renderer.env) + return paragraph.children, messages + + # U+2014 is an em-dash: + attribution_pattern = re.compile("^((?:---?(?!-)|\u2014) *)(.+)") + + def block_quote(self, lines: List[str], line_offset: int): + """Parse a block quote, which is a block of text, + followed by an (optional) attribution. + + :: + + No matter where you go, there you are. + + -- Buckaroo Banzai + """ + elements = [] + # split attribution + last_line_blank = False + blockquote_lines = lines + attribution_lines = [] + attribution_line_offset = None + # First line after a blank line must begin with a dash + for i, line in enumerate(lines): + if not line.strip(): + last_line_blank = True + continue + if not last_line_blank: + last_line_blank = False + continue + last_line_blank = False + match = self.attribution_pattern.match(line) + if not match: + continue + attribution_line_offset = i + attribution_lines = [match.group(2)] + for at_line in lines[i + 1 :]: + indented_line = at_line[len(match.group(1)) :] + if len(indented_line) != len(at_line.lstrip()): + break + attribution_lines.append(indented_line) + blockquote_lines = lines[:i] + break + # parse block + blockquote = nodes.block_quote() + self.nested_parse(blockquote_lines, line_offset, blockquote) + elements.append(blockquote) + # parse attribution + if attribution_lines: + attribution_text = "\n".join(attribution_lines) + lineno = self._lineno + line_offset + attribution_line_offset + textnodes, messages = self.inline_text(attribution_text, lineno) + attribution = nodes.attribution(attribution_text, "", *textnodes) + ( + attribution.source, + attribution.line, + ) = self.state_machine.get_source_and_line(lineno) + blockquote += attribution + elements += messages + return elements + + def build_table(self, tabledata, tableline, stub_columns=0, widths=None): + return Body.build_table(self, tabledata, tableline, stub_columns, widths) + + def build_table_row(self, rowdata, tableline): + return Body.build_table_row(self, rowdata, tableline) + + def __getattr__(self, name): + """This method is only be called if the attribute requested has not + been defined. Defined attributes will not be overridden. + """ + if hasattr(Body, name): + msg = "{cls} has not yet implemented attribute '{name}'".format( + cls=type(self).__name__, name=name + ) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + + +class MockStateMachine: + """A mock version of `docutils.parsers.rst.states.RSTStateMachine`. + + This is parsed to the `Directives.run()` method. + """ + + def __init__(self, renderer, lineno: int): + self._renderer = renderer + self._lineno = lineno + self.document = renderer.document + self.reporter = self.document.reporter + self.node = renderer.current_node + self.match_titles = True + + def get_source(self, lineno: Optional[int] = None): + """Return document source path.""" + return self.document["source"] + + def get_source_and_line(self, lineno: Optional[int] = None): + """Return (source path, line) tuple for current or given line number.""" + return self.document["source"], lineno or self._lineno + + def __getattr__(self, name): + """This method is only be called if the attribute requested has not + been defined. Defined attributes will not be overridden. + """ + if hasattr(RSTStateMachine, name): + msg = "{cls} has not yet implemented attribute '{name}'".format( + cls=type(self).__name__, name=name + ) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) + raise MockingError(msg).with_traceback(sys.exc_info()[2]) + + +class MockIncludeDirective: + """This directive uses a lot of statemachine logic that is not yet mocked. + Therefore, we treat it as a special case (at least for now). + + See: + https://docutils.sourceforge.io/docs/ref/rst/directives.html#including-an-external-document-fragment + """ + + def __init__( + self, + renderer, + name: str, + klass: Include, + arguments: list, + options: dict, + body: List[str], + token, + ): + self.renderer = renderer + self.document = renderer.document + self.name = name + self.klass = klass + self.arguments = arguments + self.options = options + self.body = body + self.lineno = token.map[0] + self.token = token + + def run(self): + + from docutils.parsers.rst.directives.body import CodeBlock, NumberLines + + if not self.document.settings.file_insertion_enabled: + raise DirectiveError(2, 'Directive "{}" disabled.'.format(self.name)) + + source_dir = Path(self.document["source"]).absolute().parent + include_arg = "".join([s.strip() for s in self.arguments[0].splitlines()]) + + if include_arg.startswith("<") and include_arg.endswith(">"): + # # docutils "standard" includes + path = Path(self.klass.standard_include_path).joinpath(include_arg[1:-1]) + else: + # if using sphinx interpret absolute paths "correctly", + # i.e. relative to source directory + try: + sphinx_env = self.document.settings.env + _, include_arg = sphinx_env.relfn2path(self.arguments[0]) + sphinx_env.note_included(include_arg) + except AttributeError: + pass + path = Path(include_arg) + path = source_dir.joinpath(path) + + # read file + encoding = self.options.get("encoding", self.document.settings.input_encoding) + error_handler = self.document.settings.input_encoding_error_handler + # tab_width = self.options.get("tab-width", self.document.settings.tab_width) + try: + file_content = path.read_text(encoding=encoding, errors=error_handler) + except Exception as error: + raise DirectiveError( + 4, + 'Directive "{}": error reading file: {}\n{error}.'.format( + self.name, path, error + ), + ) + + # get required section of text + startline = self.options.get("start-line", None) + endline = self.options.get("end-line", None) + file_content = "\n".join(file_content.splitlines()[startline:endline]) + startline = startline or 0 + for split_on_type in ["start-after", "end-before"]: + split_on = self.options.get(split_on_type, None) + if not split_on: + continue + split_index = file_content.find(split_on) + if split_index < 0: + raise DirectiveError( + 4, + 'Directive "{}"; option "{}": text not found "{}".'.format( + self.name, split_on_type, split_on + ), + ) + if split_on_type == "start-after": + startline += split_index + len(split_on) + file_content = file_content[split_index + len(split_on) :] + else: + file_content = file_content[:split_index] + + if "literal" in self.options: + literal_block = nodes.literal_block( + file_content, source=str(path), classes=self.options.get("class", []) + ) + literal_block.line = 1 # TODO don;t think this should be 1? + self.add_name(literal_block) + if "number-lines" in self.options: + try: + startline = int(self.options["number-lines"] or 1) + except ValueError: + raise DirectiveError( + 3, ":number-lines: with non-integer " "start value" + ) + endline = startline + len(file_content.splitlines()) + if file_content.endswith("\n"): + file_content = file_content[:-1] + tokens = NumberLines([([], file_content)], startline, endline) + for classes, value in tokens: + if classes: + literal_block += nodes.inline(value, value, classes=classes) + else: + literal_block += nodes.Text(value) + else: + literal_block += nodes.Text(file_content) + return [literal_block] + if "code" in self.options: + self.options["source"] = str(path) + state_machine = MockStateMachine(self.renderer, self.lineno) + state = MockState(self.renderer, state_machine, self.lineno) + codeblock = CodeBlock( + name=self.name, + arguments=[self.options.pop("code")], + options=self.options, + content=file_content.splitlines(), + lineno=self.lineno, + content_offset=0, + block_text=file_content, + state=state, + state_machine=state_machine, + ) + return codeblock.run() + + # Here we perform a nested render, but temporarily setup the document/reporter + # with the correct document path and lineno for the included file. + source = self.renderer.document["source"] + rsource = self.renderer.reporter.source + line_func = getattr(self.renderer.reporter, "get_source_and_line", None) + try: + self.renderer.document["source"] = str(path) + self.renderer.reporter.source = str(path) + self.renderer.reporter.get_source_and_line = lambda l: (str(path), l) + self.renderer.nested_render_text( + file_content, startline, disable_front_matter=False + ) + finally: + self.renderer.document["source"] = source + self.renderer.reporter.source = rsource + if line_func is not None: + self.renderer.reporter.get_source_and_line = line_func + else: + del self.renderer.reporter.get_source_and_line + return [] + + def add_name(self, node): + """Append self.options['name'] to node['names'] if it exists. + + Also normalize the name string and register it as explicit target. + """ + if "name" in self.options: + name = nodes.fully_normalize_name(self.options.pop("name")) + if "name" in node: + del node["name"] + node["names"].append(name) + self.renderer.document.note_explicit_target(node, node) diff --git a/myst_parser/span_tokens.py b/myst_parser/span_tokens.py deleted file mode 100644 index 9d85d6bd..00000000 --- a/myst_parser/span_tokens.py +++ /dev/null @@ -1,66 +0,0 @@ -import re -from typing import Pattern - -import attr - -from mistletoe import span_tokens -from mistletoe.attr_doc import autodoc -from mistletoe.base_elements import Position - -__all__ = ("Role", "Target") - - -@autodoc -@attr.s(kw_only=True, slots=True) -class Role(span_tokens.SpanToken): - """ - Inline role tokens. ("{name}`some code`") - """ - - pattern = re.compile( - r"(? 0, + refwarn=True, + ) + self.add_line_and_source_path(wrap_node, token) + title = token.attrGet("title") + if title: + wrap_node["title"] = title + self.current_node.append(wrap_node) + text_node = nodes.literal("", "", classes=["xref", "any"]) + wrap_node.append(text_node) + with self.current_node_context(text_node): + self.render_children(token) + + +def minimal_sphinx_app(configuration=None, sourcedir=None): + """Create a minimal Sphinx environment; loading sphinx roles, directives, etc. + """ + + class MockSphinx(Sphinx): + """Minimal sphinx init to load roles and directives.""" + + def __init__(self, confoverrides=None, srcdir=None): + self.extensions = {} + self.registry = SphinxComponentRegistry() + self.html_themes = {} + self.events = EventManager(self) + self.tags = Tags(None) + self.config = Config({}, confoverrides or {}) + self.config.pre_init_values() + self._init_i18n() + for extension in builtin_extensions: + self.registry.load_extension(self, extension) + # fresh env + self.doctreedir = None + self.srcdir = srcdir + self.confdir = None + self.outdir = None + self.project = Project(srcdir=srcdir, source_suffix=".md") + self.project.docnames = ["mock_docname"] + self.env = BuildEnvironment() + self.env.setup(self) + self.env.temp_data["docname"] = "mock_docname" + self.builder = None + + if not confoverrides: + return + + # this code is only required for more complex parsing with extensions + for extension in self.config.extensions: + self.setup_extension(extension) + buildername = "dummy" + self.preload_builder(buildername) + self.config.init_values() + self.events.emit("config-inited", self.config) + import tempfile + + with tempfile.TemporaryDirectory() as tempdir: + # creating a builder attempts to make the doctreedir + self.doctreedir = tempdir + self.builder = self.create_builder(buildername) + self.doctreedir = None + + app = MockSphinx(confoverrides=configuration, srcdir=sourcedir) + return app + + +@contextmanager +def mock_sphinx_env(conf=None, srcdir=None, document=None): + """Set up an environment, to parse sphinx roles/directives, + outside of a `sphinx-build`. + + :param sphinx_conf: a dictionary representation of the sphinx `conf.py` + :param sphinx_srcdir: a path to a source directory + (for example, can be used for `include` statements) + + This primarily copies the code in `sphinx.util.docutils.docutils_namespace` + and `sphinx.util.docutils.sphinx_domains`. + """ + # store currently loaded roles/directives, so we can revert on exit + _directives = copy.copy(directives._directives) + _roles = copy.copy(roles._roles) + # Monkey-patch directive and role dispatch, + # so that sphinx domain-specific markup takes precedence. + app = minimal_sphinx_app(configuration=conf, sourcedir=srcdir) + _sphinx_domains = sphinx_domains(app.env) + _sphinx_domains.enable() + if document is not None: + document.settings.env = app.env + try: + yield app + finally: + # revert loaded roles/directives + directives._directives = _directives + roles._roles = _roles + # TODO unregister nodes (see `sphinx.util.docutils.docutils_namespace`) + for node in list(additional_nodes): + unregister_node(node) + additional_nodes.discard(node) + # revert directive/role function (see `sphinx.util.docutils.sphinx_domains`) + _sphinx_domains.disable() diff --git a/tests/test_commonmark/commonmark.json b/tests/test_commonmark/commonmark.json index 5fd6f667..222fee42 100644 --- a/tests/test_commonmark/commonmark.json +++ b/tests/test_commonmark/commonmark.json @@ -3,4992 +3,5192 @@ "markdown": "\tfoo\tbaz\t\tbim\n", "html": "
foo\tbaz\t\tbim\n
\n", "example": 1, - "start_line": 350, - "end_line": 355, + "start_line": 352, + "end_line": 357, "section": "Tabs" }, { "markdown": " \tfoo\tbaz\t\tbim\n", "html": "
foo\tbaz\t\tbim\n
\n", "example": 2, - "start_line": 357, - "end_line": 362, + "start_line": 359, + "end_line": 364, "section": "Tabs" }, { "markdown": " a\ta\n ὐ\ta\n", "html": "
a\ta\nὐ\ta\n
\n", "example": 3, - "start_line": 364, - "end_line": 371, + "start_line": 366, + "end_line": 373, "section": "Tabs" }, { "markdown": " - foo\n\n\tbar\n", "html": "\n", "example": 4, - "start_line": 377, - "end_line": 388, + "start_line": 379, + "end_line": 390, "section": "Tabs" }, { "markdown": "- foo\n\n\t\tbar\n", "html": "\n", "example": 5, - "start_line": 390, - "end_line": 402, + "start_line": 392, + "end_line": 404, "section": "Tabs" }, { "markdown": ">\t\tfoo\n", "html": "
\n
  foo\n
\n
\n", "example": 6, - "start_line": 413, - "end_line": 420, + "start_line": 415, + "end_line": 422, "section": "Tabs" }, { "markdown": "-\t\tfoo\n", "html": "\n", "example": 7, - "start_line": 422, - "end_line": 431, + "start_line": 424, + "end_line": 433, "section": "Tabs" }, { "markdown": " foo\n\tbar\n", "html": "
foo\nbar\n
\n", "example": 8, - "start_line": 434, - "end_line": 441, + "start_line": 436, + "end_line": 443, "section": "Tabs" }, { "markdown": " - foo\n - bar\n\t - baz\n", "html": "\n", "example": 9, - "start_line": 443, - "end_line": 459, + "start_line": 445, + "end_line": 461, "section": "Tabs" }, { "markdown": "#\tFoo\n", "html": "

Foo

\n", "example": 10, - "start_line": 461, - "end_line": 465, + "start_line": 463, + "end_line": 467, "section": "Tabs" }, { "markdown": "*\t*\t*\t\n", "html": "
\n", "example": 11, - "start_line": 467, - "end_line": 471, + "start_line": 469, + "end_line": 473, "section": "Tabs" }, { "markdown": "- `one\n- two`\n", "html": "\n", "example": 12, - "start_line": 494, - "end_line": 502, + "start_line": 496, + "end_line": 504, "section": "Precedence" }, { "markdown": "***\n---\n___\n", "html": "
\n
\n
\n", "example": 13, - "start_line": 533, - "end_line": 541, + "start_line": 535, + "end_line": 543, "section": "Thematic breaks" }, { "markdown": "+++\n", "html": "

+++

\n", "example": 14, - "start_line": 546, - "end_line": 550, + "start_line": 548, + "end_line": 552, "section": "Thematic breaks" }, { "markdown": "===\n", "html": "

===

\n", "example": 15, - "start_line": 553, - "end_line": 557, + "start_line": 555, + "end_line": 559, "section": "Thematic breaks" }, { "markdown": "--\n**\n__\n", "html": "

--\n**\n__

\n", "example": 16, - "start_line": 562, - "end_line": 570, + "start_line": 564, + "end_line": 572, "section": "Thematic breaks" }, { "markdown": " ***\n ***\n ***\n", "html": "
\n
\n
\n", "example": 17, - "start_line": 575, - "end_line": 583, + "start_line": 577, + "end_line": 585, "section": "Thematic breaks" }, { "markdown": " ***\n", "html": "
***\n
\n", "example": 18, - "start_line": 588, - "end_line": 593, + "start_line": 590, + "end_line": 595, "section": "Thematic breaks" }, { "markdown": "Foo\n ***\n", "html": "

Foo\n***

\n", "example": 19, - "start_line": 596, - "end_line": 602, + "start_line": 598, + "end_line": 604, "section": "Thematic breaks" }, { "markdown": "_____________________________________\n", "html": "
\n", "example": 20, - "start_line": 607, - "end_line": 611, + "start_line": 609, + "end_line": 613, "section": "Thematic breaks" }, { "markdown": " - - -\n", "html": "
\n", "example": 21, - "start_line": 616, - "end_line": 620, + "start_line": 618, + "end_line": 622, "section": "Thematic breaks" }, { "markdown": " ** * ** * ** * **\n", "html": "
\n", "example": 22, - "start_line": 623, - "end_line": 627, + "start_line": 625, + "end_line": 629, "section": "Thematic breaks" }, { "markdown": "- - - -\n", "html": "
\n", "example": 23, - "start_line": 630, - "end_line": 634, + "start_line": 632, + "end_line": 636, "section": "Thematic breaks" }, { "markdown": "- - - - \n", "html": "
\n", "example": 24, - "start_line": 639, - "end_line": 643, + "start_line": 641, + "end_line": 645, "section": "Thematic breaks" }, { "markdown": "_ _ _ _ a\n\na------\n\n---a---\n", "html": "

_ _ _ _ a

\n

a------

\n

---a---

\n", "example": 25, - "start_line": 648, - "end_line": 658, + "start_line": 650, + "end_line": 660, "section": "Thematic breaks" }, { "markdown": " *-*\n", "html": "

-

\n", "example": 26, - "start_line": 664, - "end_line": 668, + "start_line": 666, + "end_line": 670, "section": "Thematic breaks" }, { "markdown": "- foo\n***\n- bar\n", "html": "\n
\n\n", "example": 27, - "start_line": 673, - "end_line": 685, + "start_line": 675, + "end_line": 687, "section": "Thematic breaks" }, { "markdown": "Foo\n***\nbar\n", "html": "

Foo

\n
\n

bar

\n", "example": 28, - "start_line": 690, - "end_line": 698, + "start_line": 692, + "end_line": 700, "section": "Thematic breaks" }, { "markdown": "Foo\n---\nbar\n", "html": "

Foo

\n

bar

\n", "example": 29, - "start_line": 707, - "end_line": 714, + "start_line": 709, + "end_line": 716, "section": "Thematic breaks" }, { "markdown": "* Foo\n* * *\n* Bar\n", "html": "\n
\n\n", "example": 30, - "start_line": 720, - "end_line": 732, + "start_line": 722, + "end_line": 734, "section": "Thematic breaks" }, { "markdown": "- Foo\n- * * *\n", "html": "\n", "example": 31, - "start_line": 737, - "end_line": 747, + "start_line": 739, + "end_line": 749, "section": "Thematic breaks" }, { "markdown": "# foo\n## foo\n### foo\n#### foo\n##### foo\n###### foo\n", "html": "

foo

\n

foo

\n

foo

\n

foo

\n
foo
\n
foo
\n", "example": 32, - "start_line": 766, - "end_line": 780, + "start_line": 768, + "end_line": 782, "section": "ATX headings" }, { "markdown": "####### foo\n", "html": "

####### foo

\n", "example": 33, - "start_line": 785, - "end_line": 789, + "start_line": 787, + "end_line": 791, "section": "ATX headings" }, { "markdown": "#5 bolt\n\n#hashtag\n", "html": "

#5 bolt

\n

#hashtag

\n", "example": 34, - "start_line": 800, - "end_line": 807, + "start_line": 802, + "end_line": 809, "section": "ATX headings" }, { "markdown": "\\## foo\n", "html": "

## foo

\n", "example": 35, - "start_line": 812, - "end_line": 816, + "start_line": 814, + "end_line": 818, "section": "ATX headings" }, { "markdown": "# foo *bar* \\*baz\\*\n", "html": "

foo bar *baz*

\n", "example": 36, - "start_line": 821, - "end_line": 825, + "start_line": 823, + "end_line": 827, "section": "ATX headings" }, { "markdown": "# foo \n", "html": "

foo

\n", "example": 37, - "start_line": 830, - "end_line": 834, + "start_line": 832, + "end_line": 836, "section": "ATX headings" }, { "markdown": " ### foo\n ## foo\n # foo\n", "html": "

foo

\n

foo

\n

foo

\n", "example": 38, - "start_line": 839, - "end_line": 847, + "start_line": 841, + "end_line": 849, "section": "ATX headings" }, { "markdown": " # foo\n", "html": "
# foo\n
\n", "example": 39, - "start_line": 852, - "end_line": 857, + "start_line": 854, + "end_line": 859, "section": "ATX headings" }, { "markdown": "foo\n # bar\n", "html": "

foo\n# bar

\n", "example": 40, - "start_line": 860, - "end_line": 866, + "start_line": 862, + "end_line": 868, "section": "ATX headings" }, { "markdown": "## foo ##\n ### bar ###\n", "html": "

foo

\n

bar

\n", "example": 41, - "start_line": 871, - "end_line": 877, + "start_line": 873, + "end_line": 879, "section": "ATX headings" }, { "markdown": "# foo ##################################\n##### foo ##\n", "html": "

foo

\n
foo
\n", "example": 42, - "start_line": 882, - "end_line": 888, + "start_line": 884, + "end_line": 890, "section": "ATX headings" }, { "markdown": "### foo ### \n", "html": "

foo

\n", "example": 43, - "start_line": 893, - "end_line": 897, + "start_line": 895, + "end_line": 899, "section": "ATX headings" }, { "markdown": "### foo ### b\n", "html": "

foo ### b

\n", "example": 44, - "start_line": 904, - "end_line": 908, + "start_line": 906, + "end_line": 910, "section": "ATX headings" }, { "markdown": "# foo#\n", "html": "

foo#

\n", "example": 45, - "start_line": 913, - "end_line": 917, + "start_line": 915, + "end_line": 919, "section": "ATX headings" }, { "markdown": "### foo \\###\n## foo #\\##\n# foo \\#\n", "html": "

foo ###

\n

foo ###

\n

foo #

\n", "example": 46, - "start_line": 923, - "end_line": 931, + "start_line": 925, + "end_line": 933, "section": "ATX headings" }, { "markdown": "****\n## foo\n****\n", "html": "
\n

foo

\n
\n", "example": 47, - "start_line": 937, - "end_line": 945, + "start_line": 939, + "end_line": 947, "section": "ATX headings" }, { "markdown": "Foo bar\n# baz\nBar foo\n", "html": "

Foo bar

\n

baz

\n

Bar foo

\n", "example": 48, - "start_line": 948, - "end_line": 956, + "start_line": 950, + "end_line": 958, "section": "ATX headings" }, { "markdown": "## \n#\n### ###\n", "html": "

\n

\n

\n", "example": 49, - "start_line": 961, - "end_line": 969, + "start_line": 963, + "end_line": 971, "section": "ATX headings" }, { "markdown": "Foo *bar*\n=========\n\nFoo *bar*\n---------\n", "html": "

Foo bar

\n

Foo bar

\n", "example": 50, - "start_line": 1004, - "end_line": 1013, + "start_line": 1006, + "end_line": 1015, "section": "Setext headings" }, { "markdown": "Foo *bar\nbaz*\n====\n", "html": "

Foo bar\nbaz

\n", "example": 51, - "start_line": 1018, - "end_line": 1025, + "start_line": 1020, + "end_line": 1027, + "section": "Setext headings" + }, + { + "markdown": " Foo *bar\nbaz*\t\n====\n", + "html": "

Foo bar\nbaz

\n", + "example": 52, + "start_line": 1034, + "end_line": 1041, "section": "Setext headings" }, { "markdown": "Foo\n-------------------------\n\nFoo\n=\n", "html": "

Foo

\n

Foo

\n", - "example": 52, - "start_line": 1030, - "end_line": 1039, + "example": 53, + "start_line": 1046, + "end_line": 1055, "section": "Setext headings" }, { "markdown": " Foo\n---\n\n Foo\n-----\n\n Foo\n ===\n", "html": "

Foo

\n

Foo

\n

Foo

\n", - "example": 53, - "start_line": 1045, - "end_line": 1058, + "example": 54, + "start_line": 1061, + "end_line": 1074, "section": "Setext headings" }, { "markdown": " Foo\n ---\n\n Foo\n---\n", "html": "
Foo\n---\n\nFoo\n
\n
\n", - "example": 54, - "start_line": 1063, - "end_line": 1076, + "example": 55, + "start_line": 1079, + "end_line": 1092, "section": "Setext headings" }, { "markdown": "Foo\n ---- \n", "html": "

Foo

\n", - "example": 55, - "start_line": 1082, - "end_line": 1087, + "example": 56, + "start_line": 1098, + "end_line": 1103, "section": "Setext headings" }, { "markdown": "Foo\n ---\n", "html": "

Foo\n---

\n", - "example": 56, - "start_line": 1092, - "end_line": 1098, + "example": 57, + "start_line": 1108, + "end_line": 1114, "section": "Setext headings" }, { "markdown": "Foo\n= =\n\nFoo\n--- -\n", "html": "

Foo\n= =

\n

Foo

\n
\n", - "example": 57, - "start_line": 1103, - "end_line": 1114, + "example": 58, + "start_line": 1119, + "end_line": 1130, "section": "Setext headings" }, { "markdown": "Foo \n-----\n", "html": "

Foo

\n", - "example": 58, - "start_line": 1119, - "end_line": 1124, + "example": 59, + "start_line": 1135, + "end_line": 1140, "section": "Setext headings" }, { "markdown": "Foo\\\n----\n", "html": "

Foo\\

\n", - "example": 59, - "start_line": 1129, - "end_line": 1134, + "example": 60, + "start_line": 1145, + "end_line": 1150, "section": "Setext headings" }, { "markdown": "`Foo\n----\n`\n\n\n", "html": "

`Foo

\n

`

\n

<a title="a lot

\n

of dashes"/>

\n", - "example": 60, - "start_line": 1140, - "end_line": 1153, + "example": 61, + "start_line": 1156, + "end_line": 1169, "section": "Setext headings" }, { "markdown": "> Foo\n---\n", "html": "
\n

Foo

\n
\n
\n", - "example": 61, - "start_line": 1159, - "end_line": 1167, + "example": 62, + "start_line": 1175, + "end_line": 1183, "section": "Setext headings" }, { "markdown": "> foo\nbar\n===\n", "html": "
\n

foo\nbar\n===

\n
\n", - "example": 62, - "start_line": 1170, - "end_line": 1180, + "example": 63, + "start_line": 1186, + "end_line": 1196, "section": "Setext headings" }, { "markdown": "- Foo\n---\n", "html": "\n
\n", - "example": 63, - "start_line": 1183, - "end_line": 1191, + "example": 64, + "start_line": 1199, + "end_line": 1207, "section": "Setext headings" }, { "markdown": "Foo\nBar\n---\n", "html": "

Foo\nBar

\n", - "example": 64, - "start_line": 1198, - "end_line": 1205, + "example": 65, + "start_line": 1214, + "end_line": 1221, "section": "Setext headings" }, { "markdown": "---\nFoo\n---\nBar\n---\nBaz\n", "html": "
\n

Foo

\n

Bar

\n

Baz

\n", - "example": 65, - "start_line": 1211, - "end_line": 1223, + "example": 66, + "start_line": 1227, + "end_line": 1239, "section": "Setext headings" }, { "markdown": "\n====\n", "html": "

====

\n", - "example": 66, - "start_line": 1228, - "end_line": 1233, + "example": 67, + "start_line": 1244, + "end_line": 1249, "section": "Setext headings" }, { "markdown": "---\n---\n", "html": "
\n
\n", - "example": 67, - "start_line": 1240, - "end_line": 1246, + "example": 68, + "start_line": 1256, + "end_line": 1262, "section": "Setext headings" }, { "markdown": "- foo\n-----\n", "html": "\n
\n", - "example": 68, - "start_line": 1249, - "end_line": 1257, + "example": 69, + "start_line": 1265, + "end_line": 1273, "section": "Setext headings" }, { "markdown": " foo\n---\n", "html": "
foo\n
\n
\n", - "example": 69, - "start_line": 1260, - "end_line": 1267, + "example": 70, + "start_line": 1276, + "end_line": 1283, "section": "Setext headings" }, { "markdown": "> foo\n-----\n", "html": "
\n

foo

\n
\n
\n", - "example": 70, - "start_line": 1270, - "end_line": 1278, + "example": 71, + "start_line": 1286, + "end_line": 1294, "section": "Setext headings" }, { "markdown": "\\> foo\n------\n", "html": "

> foo

\n", - "example": 71, - "start_line": 1284, - "end_line": 1289, + "example": 72, + "start_line": 1300, + "end_line": 1305, "section": "Setext headings" }, { "markdown": "Foo\n\nbar\n---\nbaz\n", "html": "

Foo

\n

bar

\n

baz

\n", - "example": 72, - "start_line": 1315, - "end_line": 1325, + "example": 73, + "start_line": 1331, + "end_line": 1341, "section": "Setext headings" }, { "markdown": "Foo\nbar\n\n---\n\nbaz\n", "html": "

Foo\nbar

\n
\n

baz

\n", - "example": 73, - "start_line": 1331, - "end_line": 1343, + "example": 74, + "start_line": 1347, + "end_line": 1359, "section": "Setext headings" }, { "markdown": "Foo\nbar\n* * *\nbaz\n", "html": "

Foo\nbar

\n
\n

baz

\n", - "example": 74, - "start_line": 1349, - "end_line": 1359, + "example": 75, + "start_line": 1365, + "end_line": 1375, "section": "Setext headings" }, { "markdown": "Foo\nbar\n\\---\nbaz\n", "html": "

Foo\nbar\n---\nbaz

\n", - "example": 75, - "start_line": 1364, - "end_line": 1374, + "example": 76, + "start_line": 1380, + "end_line": 1390, "section": "Setext headings" }, { "markdown": " a simple\n indented code block\n", "html": "
a simple\n  indented code block\n
\n", - "example": 76, - "start_line": 1392, - "end_line": 1399, + "example": 77, + "start_line": 1408, + "end_line": 1415, "section": "Indented code blocks" }, { "markdown": " - foo\n\n bar\n", "html": "\n", - "example": 77, - "start_line": 1406, - "end_line": 1417, + "example": 78, + "start_line": 1422, + "end_line": 1433, "section": "Indented code blocks" }, { "markdown": "1. foo\n\n - bar\n", "html": "
    \n
  1. \n

    foo

    \n
      \n
    • bar
    • \n
    \n
  2. \n
\n", - "example": 78, - "start_line": 1420, - "end_line": 1433, + "example": 79, + "start_line": 1436, + "end_line": 1449, "section": "Indented code blocks" }, { "markdown": "
\n *hi*\n\n - one\n", "html": "
<a/>\n*hi*\n\n- one\n
\n", - "example": 79, - "start_line": 1440, - "end_line": 1451, + "example": 80, + "start_line": 1456, + "end_line": 1467, "section": "Indented code blocks" }, { "markdown": " chunk1\n\n chunk2\n \n \n \n chunk3\n", "html": "
chunk1\n\nchunk2\n\n\n\nchunk3\n
\n", - "example": 80, - "start_line": 1456, - "end_line": 1473, + "example": 81, + "start_line": 1472, + "end_line": 1489, "section": "Indented code blocks" }, { "markdown": " chunk1\n \n chunk2\n", "html": "
chunk1\n  \n  chunk2\n
\n", - "example": 81, - "start_line": 1479, - "end_line": 1488, + "example": 82, + "start_line": 1495, + "end_line": 1504, "section": "Indented code blocks" }, { "markdown": "Foo\n bar\n\n", "html": "

Foo\nbar

\n", - "example": 82, - "start_line": 1494, - "end_line": 1501, + "example": 83, + "start_line": 1510, + "end_line": 1517, "section": "Indented code blocks" }, { "markdown": " foo\nbar\n", "html": "
foo\n
\n

bar

\n", - "example": 83, - "start_line": 1508, - "end_line": 1515, + "example": 84, + "start_line": 1524, + "end_line": 1531, "section": "Indented code blocks" }, { "markdown": "# Heading\n foo\nHeading\n------\n foo\n----\n", "html": "

Heading

\n
foo\n
\n

Heading

\n
foo\n
\n
\n", - "example": 84, - "start_line": 1521, - "end_line": 1536, + "example": 85, + "start_line": 1537, + "end_line": 1552, "section": "Indented code blocks" }, { "markdown": " foo\n bar\n", "html": "
    foo\nbar\n
\n", - "example": 85, - "start_line": 1541, - "end_line": 1548, + "example": 86, + "start_line": 1557, + "end_line": 1564, "section": "Indented code blocks" }, { "markdown": "\n \n foo\n \n\n", "html": "
foo\n
\n", - "example": 86, - "start_line": 1554, - "end_line": 1563, + "example": 87, + "start_line": 1570, + "end_line": 1579, "section": "Indented code blocks" }, { "markdown": " foo \n", "html": "
foo  \n
\n", - "example": 87, - "start_line": 1568, - "end_line": 1573, + "example": 88, + "start_line": 1584, + "end_line": 1589, "section": "Indented code blocks" }, { "markdown": "```\n<\n >\n```\n", "html": "
<\n >\n
\n", - "example": 88, - "start_line": 1623, - "end_line": 1632, + "example": 89, + "start_line": 1639, + "end_line": 1648, "section": "Fenced code blocks" }, { "markdown": "~~~\n<\n >\n~~~\n", "html": "
<\n >\n
\n", - "example": 89, - "start_line": 1637, - "end_line": 1646, + "example": 90, + "start_line": 1653, + "end_line": 1662, "section": "Fenced code blocks" }, { "markdown": "``\nfoo\n``\n", "html": "

foo

\n", - "example": 90, - "start_line": 1650, - "end_line": 1656, + "example": 91, + "start_line": 1666, + "end_line": 1672, "section": "Fenced code blocks" }, { "markdown": "```\naaa\n~~~\n```\n", "html": "
aaa\n~~~\n
\n", - "example": 91, - "start_line": 1661, - "end_line": 1670, + "example": 92, + "start_line": 1677, + "end_line": 1686, "section": "Fenced code blocks" }, { "markdown": "~~~\naaa\n```\n~~~\n", "html": "
aaa\n```\n
\n", - "example": 92, - "start_line": 1673, - "end_line": 1682, + "example": 93, + "start_line": 1689, + "end_line": 1698, "section": "Fenced code blocks" }, { "markdown": "````\naaa\n```\n``````\n", "html": "
aaa\n```\n
\n", - "example": 93, - "start_line": 1687, - "end_line": 1696, + "example": 94, + "start_line": 1703, + "end_line": 1712, "section": "Fenced code blocks" }, { "markdown": "~~~~\naaa\n~~~\n~~~~\n", "html": "
aaa\n~~~\n
\n", - "example": 94, - "start_line": 1699, - "end_line": 1708, + "example": 95, + "start_line": 1715, + "end_line": 1724, "section": "Fenced code blocks" }, { "markdown": "```\n", "html": "
\n", - "example": 95, - "start_line": 1714, - "end_line": 1718, + "example": 96, + "start_line": 1730, + "end_line": 1734, "section": "Fenced code blocks" }, { "markdown": "`````\n\n```\naaa\n", "html": "
\n```\naaa\n
\n", - "example": 96, - "start_line": 1721, - "end_line": 1731, + "example": 97, + "start_line": 1737, + "end_line": 1747, "section": "Fenced code blocks" }, { "markdown": "> ```\n> aaa\n\nbbb\n", "html": "
\n
aaa\n
\n
\n

bbb

\n", - "example": 97, - "start_line": 1734, - "end_line": 1745, + "example": 98, + "start_line": 1750, + "end_line": 1761, "section": "Fenced code blocks" }, { "markdown": "```\n\n \n```\n", "html": "
\n  \n
\n", - "example": 98, - "start_line": 1750, - "end_line": 1759, + "example": 99, + "start_line": 1766, + "end_line": 1775, "section": "Fenced code blocks" }, { "markdown": "```\n```\n", "html": "
\n", - "example": 99, - "start_line": 1764, - "end_line": 1769, + "example": 100, + "start_line": 1780, + "end_line": 1785, "section": "Fenced code blocks" }, { "markdown": " ```\n aaa\naaa\n```\n", "html": "
aaa\naaa\n
\n", - "example": 100, - "start_line": 1776, - "end_line": 1785, + "example": 101, + "start_line": 1792, + "end_line": 1801, "section": "Fenced code blocks" }, { "markdown": " ```\naaa\n aaa\naaa\n ```\n", "html": "
aaa\naaa\naaa\n
\n", - "example": 101, - "start_line": 1788, - "end_line": 1799, + "example": 102, + "start_line": 1804, + "end_line": 1815, "section": "Fenced code blocks" }, { "markdown": " ```\n aaa\n aaa\n aaa\n ```\n", "html": "
aaa\n aaa\naaa\n
\n", - "example": 102, - "start_line": 1802, - "end_line": 1813, + "example": 103, + "start_line": 1818, + "end_line": 1829, "section": "Fenced code blocks" }, { "markdown": " ```\n aaa\n ```\n", "html": "
```\naaa\n```\n
\n", - "example": 103, - "start_line": 1818, - "end_line": 1827, + "example": 104, + "start_line": 1834, + "end_line": 1843, "section": "Fenced code blocks" }, { "markdown": "```\naaa\n ```\n", "html": "
aaa\n
\n", - "example": 104, - "start_line": 1833, - "end_line": 1840, + "example": 105, + "start_line": 1849, + "end_line": 1856, "section": "Fenced code blocks" }, { "markdown": " ```\naaa\n ```\n", "html": "
aaa\n
\n", - "example": 105, - "start_line": 1843, - "end_line": 1850, + "example": 106, + "start_line": 1859, + "end_line": 1866, "section": "Fenced code blocks" }, { "markdown": "```\naaa\n ```\n", "html": "
aaa\n    ```\n
\n", - "example": 106, - "start_line": 1855, - "end_line": 1863, + "example": 107, + "start_line": 1871, + "end_line": 1879, "section": "Fenced code blocks" }, { "markdown": "``` ```\naaa\n", - "html": "

\naaa

\n", - "example": 107, - "start_line": 1869, - "end_line": 1875, + "html": "

\naaa

\n", + "example": 108, + "start_line": 1885, + "end_line": 1891, "section": "Fenced code blocks" }, { "markdown": "~~~~~~\naaa\n~~~ ~~\n", "html": "
aaa\n~~~ ~~\n
\n", - "example": 108, - "start_line": 1878, - "end_line": 1886, + "example": 109, + "start_line": 1894, + "end_line": 1902, "section": "Fenced code blocks" }, { "markdown": "foo\n```\nbar\n```\nbaz\n", "html": "

foo

\n
bar\n
\n

baz

\n", - "example": 109, - "start_line": 1892, - "end_line": 1903, + "example": 110, + "start_line": 1908, + "end_line": 1919, "section": "Fenced code blocks" }, { "markdown": "foo\n---\n~~~\nbar\n~~~\n# baz\n", "html": "

foo

\n
bar\n
\n

baz

\n", - "example": 110, - "start_line": 1909, - "end_line": 1921, + "example": 111, + "start_line": 1925, + "end_line": 1937, "section": "Fenced code blocks" }, { "markdown": "```ruby\ndef foo(x)\n return 3\nend\n```\n", "html": "
def foo(x)\n  return 3\nend\n
\n", - "example": 111, - "start_line": 1929, - "end_line": 1940, + "example": 112, + "start_line": 1947, + "end_line": 1958, "section": "Fenced code blocks" }, { "markdown": "~~~~ ruby startline=3 $%@#$\ndef foo(x)\n return 3\nend\n~~~~~~~\n", "html": "
def foo(x)\n  return 3\nend\n
\n", - "example": 112, - "start_line": 1943, - "end_line": 1954, + "example": 113, + "start_line": 1961, + "end_line": 1972, "section": "Fenced code blocks" }, { "markdown": "````;\n````\n", "html": "
\n", - "example": 113, - "start_line": 1957, - "end_line": 1962, + "example": 114, + "start_line": 1975, + "end_line": 1980, "section": "Fenced code blocks" }, { "markdown": "``` aa ```\nfoo\n", "html": "

aa\nfoo

\n", - "example": 114, - "start_line": 1967, - "end_line": 1973, + "example": 115, + "start_line": 1985, + "end_line": 1991, + "section": "Fenced code blocks" + }, + { + "markdown": "~~~ aa ``` ~~~\nfoo\n~~~\n", + "html": "
foo\n
\n", + "example": 116, + "start_line": 1996, + "end_line": 2003, "section": "Fenced code blocks" }, { "markdown": "```\n``` aaa\n```\n", "html": "
``` aaa\n
\n", - "example": 115, - "start_line": 1978, - "end_line": 1985, + "example": 117, + "start_line": 2008, + "end_line": 2015, "section": "Fenced code blocks" }, { "markdown": "
\n
\n**Hello**,\n\n_world_.\n
\n
\n", "html": "
\n
\n**Hello**,\n

world.\n

\n
\n", - "example": 116, - "start_line": 2055, - "end_line": 2070, + "example": 118, + "start_line": 2087, + "end_line": 2102, "section": "HTML blocks" }, { "markdown": "\n \n \n \n
\n hi\n
\n\nokay.\n", "html": "\n \n \n \n
\n hi\n
\n

okay.

\n", - "example": 117, - "start_line": 2084, - "end_line": 2103, + "example": 119, + "start_line": 2116, + "end_line": 2135, "section": "HTML blocks" }, { "markdown": "
\n *hello*\n \n", "html": "
\n *hello*\n \n", - "example": 118, - "start_line": 2106, - "end_line": 2114, + "example": 120, + "start_line": 2138, + "end_line": 2146, "section": "HTML blocks" }, { "markdown": "
\n*foo*\n", "html": "
\n*foo*\n", - "example": 119, - "start_line": 2119, - "end_line": 2125, + "example": 121, + "start_line": 2151, + "end_line": 2157, "section": "HTML blocks" }, { "markdown": "
\n\n*Markdown*\n\n
\n", "html": "
\n

Markdown

\n
\n", - "example": 120, - "start_line": 2130, - "end_line": 2140, + "example": 122, + "start_line": 2162, + "end_line": 2172, "section": "HTML blocks" }, { "markdown": "
\n
\n", "html": "
\n
\n", - "example": 121, - "start_line": 2146, - "end_line": 2154, + "example": 123, + "start_line": 2178, + "end_line": 2186, "section": "HTML blocks" }, { "markdown": "
\n
\n", "html": "
\n
\n", - "example": 122, - "start_line": 2157, - "end_line": 2165, + "example": 124, + "start_line": 2189, + "end_line": 2197, "section": "HTML blocks" }, { "markdown": "
\n*foo*\n\n*bar*\n", "html": "
\n*foo*\n

bar

\n", - "example": 123, - "start_line": 2169, - "end_line": 2178, + "example": 125, + "start_line": 2201, + "end_line": 2210, "section": "HTML blocks" }, { "markdown": "
\n", "html": "
*foo*
\n", - "example": 127, - "start_line": 2218, - "end_line": 2222, + "example": 129, + "start_line": 2250, + "end_line": 2254, "section": "HTML blocks" }, { "markdown": "
\nfoo\n
\n", "html": "
\nfoo\n
\n", - "example": 128, - "start_line": 2225, - "end_line": 2233, + "example": 130, + "start_line": 2257, + "end_line": 2265, "section": "HTML blocks" }, { "markdown": "
\n``` c\nint x = 33;\n```\n", "html": "
\n``` c\nint x = 33;\n```\n", - "example": 129, - "start_line": 2242, - "end_line": 2252, + "example": 131, + "start_line": 2274, + "end_line": 2284, "section": "HTML blocks" }, { "markdown": "\n*bar*\n\n", "html": "\n*bar*\n\n", - "example": 130, - "start_line": 2259, - "end_line": 2267, + "example": 132, + "start_line": 2291, + "end_line": 2299, "section": "HTML blocks" }, { "markdown": "\n*bar*\n\n", "html": "\n*bar*\n\n", - "example": 131, - "start_line": 2272, - "end_line": 2280, + "example": 133, + "start_line": 2304, + "end_line": 2312, "section": "HTML blocks" }, { "markdown": "\n*bar*\n\n", "html": "\n*bar*\n\n", - "example": 132, - "start_line": 2283, - "end_line": 2291, + "example": 134, + "start_line": 2315, + "end_line": 2323, "section": "HTML blocks" }, { "markdown": "\n*bar*\n", "html": "\n*bar*\n", - "example": 133, - "start_line": 2294, - "end_line": 2300, + "example": 135, + "start_line": 2326, + "end_line": 2332, "section": "HTML blocks" }, { "markdown": "\n*foo*\n\n", "html": "\n*foo*\n\n", - "example": 134, - "start_line": 2309, - "end_line": 2317, + "example": 136, + "start_line": 2341, + "end_line": 2349, "section": "HTML blocks" }, { "markdown": "\n\n*foo*\n\n\n", "html": "\n

foo

\n
\n", - "example": 135, - "start_line": 2324, - "end_line": 2334, + "example": 137, + "start_line": 2356, + "end_line": 2366, "section": "HTML blocks" }, { "markdown": "*foo*\n", "html": "

foo

\n", - "example": 136, - "start_line": 2342, - "end_line": 2346, + "example": 138, + "start_line": 2374, + "end_line": 2378, "section": "HTML blocks" }, { "markdown": "
\nimport Text.HTML.TagSoup\n\nmain :: IO ()\nmain = print $ parseTags tags\n
\nokay\n", "html": "
\nimport Text.HTML.TagSoup\n\nmain :: IO ()\nmain = print $ parseTags tags\n
\n

okay

\n", - "example": 137, - "start_line": 2358, - "end_line": 2374, + "example": 139, + "start_line": 2390, + "end_line": 2406, "section": "HTML blocks" }, { "markdown": "\nokay\n", "html": "\n

okay

\n", - "example": 138, - "start_line": 2379, - "end_line": 2393, + "example": 140, + "start_line": 2411, + "end_line": 2425, "section": "HTML blocks" }, { "markdown": "\nh1 {color:red;}\n\np {color:blue;}\n\nokay\n", "html": "\nh1 {color:red;}\n\np {color:blue;}\n\n

okay

\n", - "example": 139, - "start_line": 2398, - "end_line": 2414, + "example": 141, + "start_line": 2430, + "end_line": 2446, "section": "HTML blocks" }, { "markdown": "\n\nfoo\n", "html": "\n\nfoo\n", - "example": 140, - "start_line": 2421, - "end_line": 2431, + "example": 142, + "start_line": 2453, + "end_line": 2463, "section": "HTML blocks" }, { "markdown": ">
\n> foo\n\nbar\n", "html": "
\n
\nfoo\n
\n

bar

\n", - "example": 141, - "start_line": 2434, - "end_line": 2445, + "example": 143, + "start_line": 2466, + "end_line": 2477, "section": "HTML blocks" }, { "markdown": "-
\n- foo\n", "html": "
    \n
  • \n
    \n
  • \n
  • foo
  • \n
\n", - "example": 142, - "start_line": 2448, - "end_line": 2458, + "example": 144, + "start_line": 2480, + "end_line": 2490, "section": "HTML blocks" }, { "markdown": "\n*foo*\n", "html": "\n

foo

\n", - "example": 143, - "start_line": 2463, - "end_line": 2469, + "example": 145, + "start_line": 2495, + "end_line": 2501, "section": "HTML blocks" }, { "markdown": "*bar*\n*baz*\n", "html": "*bar*\n

baz

\n", - "example": 144, - "start_line": 2472, - "end_line": 2478, + "example": 146, + "start_line": 2504, + "end_line": 2510, "section": "HTML blocks" }, { "markdown": "1. *bar*\n", "html": "1. *bar*\n", - "example": 145, - "start_line": 2484, - "end_line": 2492, + "example": 147, + "start_line": 2516, + "end_line": 2524, "section": "HTML blocks" }, { "markdown": "\nokay\n", "html": "\n

okay

\n", - "example": 146, - "start_line": 2497, - "end_line": 2509, + "example": 148, + "start_line": 2529, + "end_line": 2541, "section": "HTML blocks" }, { "markdown": "';\n\n?>\nokay\n", "html": "';\n\n?>\n

okay

\n", - "example": 147, - "start_line": 2515, - "end_line": 2529, + "example": 149, + "start_line": 2547, + "end_line": 2561, "section": "HTML blocks" }, { "markdown": "\n", "html": "\n", - "example": 148, - "start_line": 2534, - "end_line": 2538, + "example": 150, + "start_line": 2566, + "end_line": 2570, "section": "HTML blocks" }, { "markdown": "\nokay\n", "html": "\n

okay

\n", - "example": 149, - "start_line": 2543, - "end_line": 2571, + "example": 151, + "start_line": 2575, + "end_line": 2603, "section": "HTML blocks" }, { "markdown": " \n\n \n", "html": " \n
<!-- foo -->\n
\n", - "example": 150, - "start_line": 2576, - "end_line": 2584, + "example": 152, + "start_line": 2608, + "end_line": 2616, "section": "HTML blocks" }, { "markdown": "
\n\n
\n", "html": "
\n
<div>\n
\n", - "example": 151, - "start_line": 2587, - "end_line": 2595, + "example": 153, + "start_line": 2619, + "end_line": 2627, "section": "HTML blocks" }, { "markdown": "Foo\n
\nbar\n
\n", "html": "

Foo

\n
\nbar\n
\n", - "example": 152, - "start_line": 2601, - "end_line": 2611, + "example": 154, + "start_line": 2633, + "end_line": 2643, "section": "HTML blocks" }, { "markdown": "
\nbar\n
\n*foo*\n", "html": "
\nbar\n
\n*foo*\n", - "example": 153, - "start_line": 2617, - "end_line": 2627, + "example": 155, + "start_line": 2650, + "end_line": 2660, "section": "HTML blocks" }, { "markdown": "Foo\n\nbaz\n", "html": "

Foo\n\nbaz

\n", - "example": 154, - "start_line": 2632, - "end_line": 2640, + "example": 156, + "start_line": 2665, + "end_line": 2673, "section": "HTML blocks" }, { "markdown": "
\n\n*Emphasized* text.\n\n
\n", "html": "
\n

Emphasized text.

\n
\n", - "example": 155, - "start_line": 2673, - "end_line": 2683, + "example": 157, + "start_line": 2706, + "end_line": 2716, "section": "HTML blocks" }, { "markdown": "
\n*Emphasized* text.\n
\n", "html": "
\n*Emphasized* text.\n
\n", - "example": 156, - "start_line": 2686, - "end_line": 2694, + "example": 158, + "start_line": 2719, + "end_line": 2727, "section": "HTML blocks" }, { "markdown": "\n\n\n\n\n\n\n\n
\nHi\n
\n", "html": "\n\n\n\n
\nHi\n
\n", - "example": 157, - "start_line": 2708, - "end_line": 2728, + "example": 159, + "start_line": 2741, + "end_line": 2761, "section": "HTML blocks" }, { "markdown": "\n\n \n\n \n\n \n\n
\n Hi\n
\n", "html": "\n \n
<td>\n  Hi\n</td>\n
\n \n
\n", - "example": 158, - "start_line": 2735, - "end_line": 2756, + "example": 160, + "start_line": 2768, + "end_line": 2789, "section": "HTML blocks" }, { "markdown": "[foo]: /url \"title\"\n\n[foo]\n", "html": "

foo

\n", - "example": 159, - "start_line": 2783, - "end_line": 2789, + "example": 161, + "start_line": 2816, + "end_line": 2822, "section": "Link reference definitions" }, { "markdown": " [foo]: \n /url \n 'the title' \n\n[foo]\n", "html": "

foo

\n", - "example": 160, - "start_line": 2792, - "end_line": 2800, + "example": 162, + "start_line": 2825, + "end_line": 2833, "section": "Link reference definitions" }, { "markdown": "[Foo*bar\\]]:my_(url) 'title (with parens)'\n\n[Foo*bar\\]]\n", "html": "

Foo*bar]

\n", - "example": 161, - "start_line": 2803, - "end_line": 2809, + "example": 163, + "start_line": 2836, + "end_line": 2842, "section": "Link reference definitions" }, { - "markdown": "[Foo bar]:\n\n'title'\n\n[Foo bar]\n", + "markdown": "[Foo bar]:\n\n'title'\n\n[Foo bar]\n", "html": "

Foo bar

\n", - "example": 162, - "start_line": 2812, - "end_line": 2820, + "example": 164, + "start_line": 2845, + "end_line": 2853, "section": "Link reference definitions" }, { "markdown": "[foo]: /url '\ntitle\nline1\nline2\n'\n\n[foo]\n", "html": "

foo

\n", - "example": 163, - "start_line": 2825, - "end_line": 2839, + "example": 165, + "start_line": 2858, + "end_line": 2872, "section": "Link reference definitions" }, { "markdown": "[foo]: /url 'title\n\nwith blank line'\n\n[foo]\n", "html": "

[foo]: /url 'title

\n

with blank line'

\n

[foo]

\n", - "example": 164, - "start_line": 2844, - "end_line": 2854, + "example": 166, + "start_line": 2877, + "end_line": 2887, "section": "Link reference definitions" }, { "markdown": "[foo]:\n/url\n\n[foo]\n", "html": "

foo

\n", - "example": 165, - "start_line": 2859, - "end_line": 2866, + "example": 167, + "start_line": 2892, + "end_line": 2899, "section": "Link reference definitions" }, { "markdown": "[foo]:\n\n[foo]\n", "html": "

[foo]:

\n

[foo]

\n", - "example": 166, - "start_line": 2871, - "end_line": 2878, + "example": 168, + "start_line": 2904, + "end_line": 2911, + "section": "Link reference definitions" + }, + { + "markdown": "[foo]: <>\n\n[foo]\n", + "html": "

foo

\n", + "example": 169, + "start_line": 2916, + "end_line": 2922, + "section": "Link reference definitions" + }, + { + "markdown": "[foo]: (baz)\n\n[foo]\n", + "html": "

[foo]: (baz)

\n

[foo]

\n", + "example": 170, + "start_line": 2927, + "end_line": 2934, "section": "Link reference definitions" }, { "markdown": "[foo]: /url\\bar\\*baz \"foo\\\"bar\\baz\"\n\n[foo]\n", "html": "

foo

\n", - "example": 167, - "start_line": 2884, - "end_line": 2890, + "example": 171, + "start_line": 2940, + "end_line": 2946, "section": "Link reference definitions" }, { "markdown": "[foo]\n\n[foo]: url\n", "html": "

foo

\n", - "example": 168, - "start_line": 2895, - "end_line": 2901, + "example": 172, + "start_line": 2951, + "end_line": 2957, "section": "Link reference definitions" }, { "markdown": "[foo]\n\n[foo]: first\n[foo]: second\n", "html": "

foo

\n", - "example": 169, - "start_line": 2907, - "end_line": 2914, + "example": 173, + "start_line": 2963, + "end_line": 2970, "section": "Link reference definitions" }, { "markdown": "[FOO]: /url\n\n[Foo]\n", "html": "

Foo

\n", - "example": 170, - "start_line": 2920, - "end_line": 2926, + "example": 174, + "start_line": 2976, + "end_line": 2982, "section": "Link reference definitions" }, { "markdown": "[ΑΓΩ]: /φου\n\n[αγω]\n", "html": "

αγω

\n", - "example": 171, - "start_line": 2929, - "end_line": 2935, + "example": 175, + "start_line": 2985, + "end_line": 2991, "section": "Link reference definitions" }, { "markdown": "[foo]: /url\n", "html": "", - "example": 172, - "start_line": 2941, - "end_line": 2944, + "example": 176, + "start_line": 2997, + "end_line": 3000, "section": "Link reference definitions" }, { "markdown": "[\nfoo\n]: /url\nbar\n", "html": "

bar

\n", - "example": 173, - "start_line": 2949, - "end_line": 2956, + "example": 177, + "start_line": 3005, + "end_line": 3012, "section": "Link reference definitions" }, { "markdown": "[foo]: /url \"title\" ok\n", "html": "

[foo]: /url "title" ok

\n", - "example": 174, - "start_line": 2962, - "end_line": 2966, + "example": 178, + "start_line": 3018, + "end_line": 3022, "section": "Link reference definitions" }, { "markdown": "[foo]: /url\n\"title\" ok\n", "html": "

"title" ok

\n", - "example": 175, - "start_line": 2971, - "end_line": 2976, + "example": 179, + "start_line": 3027, + "end_line": 3032, "section": "Link reference definitions" }, { "markdown": " [foo]: /url \"title\"\n\n[foo]\n", "html": "
[foo]: /url "title"\n
\n

[foo]

\n", - "example": 176, - "start_line": 2982, - "end_line": 2990, + "example": 180, + "start_line": 3038, + "end_line": 3046, "section": "Link reference definitions" }, { "markdown": "```\n[foo]: /url\n```\n\n[foo]\n", "html": "
[foo]: /url\n
\n

[foo]

\n", - "example": 177, - "start_line": 2996, - "end_line": 3006, + "example": 181, + "start_line": 3052, + "end_line": 3062, "section": "Link reference definitions" }, { "markdown": "Foo\n[bar]: /baz\n\n[bar]\n", "html": "

Foo\n[bar]: /baz

\n

[bar]

\n", - "example": 178, - "start_line": 3011, - "end_line": 3020, + "example": 182, + "start_line": 3067, + "end_line": 3076, "section": "Link reference definitions" }, { "markdown": "# [Foo]\n[foo]: /url\n> bar\n", "html": "

Foo

\n
\n

bar

\n
\n", - "example": 179, - "start_line": 3026, - "end_line": 3035, + "example": 183, + "start_line": 3082, + "end_line": 3091, + "section": "Link reference definitions" + }, + { + "markdown": "[foo]: /url\nbar\n===\n[foo]\n", + "html": "

bar

\n

foo

\n", + "example": 184, + "start_line": 3093, + "end_line": 3101, + "section": "Link reference definitions" + }, + { + "markdown": "[foo]: /url\n===\n[foo]\n", + "html": "

===\nfoo

\n", + "example": 185, + "start_line": 3103, + "end_line": 3110, "section": "Link reference definitions" }, { "markdown": "[foo]: /foo-url \"foo\"\n[bar]: /bar-url\n \"bar\"\n[baz]: /baz-url\n\n[foo],\n[bar],\n[baz]\n", "html": "

foo,\nbar,\nbaz

\n", - "example": 180, - "start_line": 3041, - "end_line": 3054, + "example": 186, + "start_line": 3116, + "end_line": 3129, "section": "Link reference definitions" }, { "markdown": "[foo]\n\n> [foo]: /url\n", "html": "

foo

\n
\n
\n", - "example": 181, - "start_line": 3062, - "end_line": 3070, + "example": 187, + "start_line": 3137, + "end_line": 3145, + "section": "Link reference definitions" + }, + { + "markdown": "[foo]: /url\n", + "html": "", + "example": 188, + "start_line": 3154, + "end_line": 3157, "section": "Link reference definitions" }, { "markdown": "aaa\n\nbbb\n", "html": "

aaa

\n

bbb

\n", - "example": 182, - "start_line": 3085, - "end_line": 3092, + "example": 189, + "start_line": 3171, + "end_line": 3178, "section": "Paragraphs" }, { "markdown": "aaa\nbbb\n\nccc\nddd\n", "html": "

aaa\nbbb

\n

ccc\nddd

\n", - "example": 183, - "start_line": 3097, - "end_line": 3108, + "example": 190, + "start_line": 3183, + "end_line": 3194, "section": "Paragraphs" }, { "markdown": "aaa\n\n\nbbb\n", "html": "

aaa

\n

bbb

\n", - "example": 184, - "start_line": 3113, - "end_line": 3121, + "example": 191, + "start_line": 3199, + "end_line": 3207, "section": "Paragraphs" }, { "markdown": " aaa\n bbb\n", "html": "

aaa\nbbb

\n", - "example": 185, - "start_line": 3126, - "end_line": 3132, + "example": 192, + "start_line": 3212, + "end_line": 3218, "section": "Paragraphs" }, { "markdown": "aaa\n bbb\n ccc\n", "html": "

aaa\nbbb\nccc

\n", - "example": 186, - "start_line": 3138, - "end_line": 3146, + "example": 193, + "start_line": 3224, + "end_line": 3232, "section": "Paragraphs" }, { "markdown": " aaa\nbbb\n", "html": "

aaa\nbbb

\n", - "example": 187, - "start_line": 3152, - "end_line": 3158, + "example": 194, + "start_line": 3238, + "end_line": 3244, "section": "Paragraphs" }, { "markdown": " aaa\nbbb\n", "html": "
aaa\n
\n

bbb

\n", - "example": 188, - "start_line": 3161, - "end_line": 3168, + "example": 195, + "start_line": 3247, + "end_line": 3254, "section": "Paragraphs" }, { "markdown": "aaa \nbbb \n", "html": "

aaa
\nbbb

\n", - "example": 189, - "start_line": 3175, - "end_line": 3181, + "example": 196, + "start_line": 3261, + "end_line": 3267, "section": "Paragraphs" }, { "markdown": " \n\naaa\n \n\n# aaa\n\n \n", "html": "

aaa

\n

aaa

\n", - "example": 190, - "start_line": 3192, - "end_line": 3204, + "example": 197, + "start_line": 3278, + "end_line": 3290, "section": "Blank lines" }, { "markdown": "> # Foo\n> bar\n> baz\n", "html": "
\n

Foo

\n

bar\nbaz

\n
\n", - "example": 191, - "start_line": 3258, - "end_line": 3268, + "example": 198, + "start_line": 3344, + "end_line": 3354, "section": "Block quotes" }, { "markdown": "># Foo\n>bar\n> baz\n", "html": "
\n

Foo

\n

bar\nbaz

\n
\n", - "example": 192, - "start_line": 3273, - "end_line": 3283, + "example": 199, + "start_line": 3359, + "end_line": 3369, "section": "Block quotes" }, { "markdown": " > # Foo\n > bar\n > baz\n", "html": "
\n

Foo

\n

bar\nbaz

\n
\n", - "example": 193, - "start_line": 3288, - "end_line": 3298, + "example": 200, + "start_line": 3374, + "end_line": 3384, "section": "Block quotes" }, { "markdown": " > # Foo\n > bar\n > baz\n", "html": "
> # Foo\n> bar\n> baz\n
\n", - "example": 194, - "start_line": 3303, - "end_line": 3312, + "example": 201, + "start_line": 3389, + "end_line": 3398, "section": "Block quotes" }, { "markdown": "> # Foo\n> bar\nbaz\n", "html": "
\n

Foo

\n

bar\nbaz

\n
\n", - "example": 195, - "start_line": 3318, - "end_line": 3328, + "example": 202, + "start_line": 3404, + "end_line": 3414, "section": "Block quotes" }, { "markdown": "> bar\nbaz\n> foo\n", "html": "
\n

bar\nbaz\nfoo

\n
\n", - "example": 196, - "start_line": 3334, - "end_line": 3344, + "example": 203, + "start_line": 3420, + "end_line": 3430, "section": "Block quotes" }, { "markdown": "> foo\n---\n", "html": "
\n

foo

\n
\n
\n", - "example": 197, - "start_line": 3358, - "end_line": 3366, + "example": 204, + "start_line": 3444, + "end_line": 3452, "section": "Block quotes" }, { "markdown": "> - foo\n- bar\n", "html": "
\n
    \n
  • foo
  • \n
\n
\n
    \n
  • bar
  • \n
\n", - "example": 198, - "start_line": 3378, - "end_line": 3390, + "example": 205, + "start_line": 3464, + "end_line": 3476, "section": "Block quotes" }, { "markdown": "> foo\n bar\n", "html": "
\n
foo\n
\n
\n
bar\n
\n", - "example": 199, - "start_line": 3396, - "end_line": 3406, + "example": 206, + "start_line": 3482, + "end_line": 3492, "section": "Block quotes" }, { "markdown": "> ```\nfoo\n```\n", "html": "
\n
\n
\n

foo

\n
\n", - "example": 200, - "start_line": 3409, - "end_line": 3419, + "example": 207, + "start_line": 3495, + "end_line": 3505, "section": "Block quotes" }, { "markdown": "> foo\n - bar\n", "html": "
\n

foo\n- bar

\n
\n", - "example": 201, - "start_line": 3425, - "end_line": 3433, + "example": 208, + "start_line": 3511, + "end_line": 3519, "section": "Block quotes" }, { "markdown": ">\n", "html": "
\n
\n", - "example": 202, - "start_line": 3449, - "end_line": 3454, + "example": 209, + "start_line": 3535, + "end_line": 3540, "section": "Block quotes" }, { "markdown": ">\n> \n> \n", "html": "
\n
\n", - "example": 203, - "start_line": 3457, - "end_line": 3464, + "example": 210, + "start_line": 3543, + "end_line": 3550, "section": "Block quotes" }, { "markdown": ">\n> foo\n> \n", "html": "
\n

foo

\n
\n", - "example": 204, - "start_line": 3469, - "end_line": 3477, + "example": 211, + "start_line": 3555, + "end_line": 3563, "section": "Block quotes" }, { "markdown": "> foo\n\n> bar\n", "html": "
\n

foo

\n
\n
\n

bar

\n
\n", - "example": 205, - "start_line": 3482, - "end_line": 3493, + "example": 212, + "start_line": 3568, + "end_line": 3579, "section": "Block quotes" }, { "markdown": "> foo\n> bar\n", "html": "
\n

foo\nbar

\n
\n", - "example": 206, - "start_line": 3504, - "end_line": 3512, + "example": 213, + "start_line": 3590, + "end_line": 3598, "section": "Block quotes" }, { "markdown": "> foo\n>\n> bar\n", "html": "
\n

foo

\n

bar

\n
\n", - "example": 207, - "start_line": 3517, - "end_line": 3526, + "example": 214, + "start_line": 3603, + "end_line": 3612, "section": "Block quotes" }, { "markdown": "foo\n> bar\n", "html": "

foo

\n
\n

bar

\n
\n", - "example": 208, - "start_line": 3531, - "end_line": 3539, + "example": 215, + "start_line": 3617, + "end_line": 3625, "section": "Block quotes" }, { "markdown": "> aaa\n***\n> bbb\n", "html": "
\n

aaa

\n
\n
\n
\n

bbb

\n
\n", - "example": 209, - "start_line": 3545, - "end_line": 3557, + "example": 216, + "start_line": 3631, + "end_line": 3643, "section": "Block quotes" }, { "markdown": "> bar\nbaz\n", "html": "
\n

bar\nbaz

\n
\n", - "example": 210, - "start_line": 3563, - "end_line": 3571, + "example": 217, + "start_line": 3649, + "end_line": 3657, "section": "Block quotes" }, { "markdown": "> bar\n\nbaz\n", "html": "
\n

bar

\n
\n

baz

\n", - "example": 211, - "start_line": 3574, - "end_line": 3583, + "example": 218, + "start_line": 3660, + "end_line": 3669, "section": "Block quotes" }, { "markdown": "> bar\n>\nbaz\n", "html": "
\n

bar

\n
\n

baz

\n", - "example": 212, - "start_line": 3586, - "end_line": 3595, + "example": 219, + "start_line": 3672, + "end_line": 3681, "section": "Block quotes" }, { "markdown": "> > > foo\nbar\n", "html": "
\n
\n
\n

foo\nbar

\n
\n
\n
\n", - "example": 213, - "start_line": 3602, - "end_line": 3614, + "example": 220, + "start_line": 3688, + "end_line": 3700, "section": "Block quotes" }, { "markdown": ">>> foo\n> bar\n>>baz\n", "html": "
\n
\n
\n

foo\nbar\nbaz

\n
\n
\n
\n", - "example": 214, - "start_line": 3617, - "end_line": 3631, + "example": 221, + "start_line": 3703, + "end_line": 3717, "section": "Block quotes" }, { "markdown": "> code\n\n> not code\n", "html": "
\n
code\n
\n
\n
\n

not code

\n
\n", - "example": 215, - "start_line": 3639, - "end_line": 3651, + "example": 222, + "start_line": 3725, + "end_line": 3737, "section": "Block quotes" }, { "markdown": "A paragraph\nwith two lines.\n\n indented code\n\n> A block quote.\n", "html": "

A paragraph\nwith two lines.

\n
indented code\n
\n
\n

A block quote.

\n
\n", - "example": 216, - "start_line": 3694, - "end_line": 3709, + "example": 223, + "start_line": 3779, + "end_line": 3794, "section": "List items" }, { "markdown": "1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n", "html": "
    \n
  1. \n

    A paragraph\nwith two lines.

    \n
    indented code\n
    \n
    \n

    A block quote.

    \n
    \n
  2. \n
\n", - "example": 217, - "start_line": 3716, - "end_line": 3735, + "example": 224, + "start_line": 3801, + "end_line": 3820, "section": "List items" }, { "markdown": "- one\n\n two\n", "html": "
    \n
  • one
  • \n
\n

two

\n", - "example": 218, - "start_line": 3749, - "end_line": 3758, + "example": 225, + "start_line": 3834, + "end_line": 3843, "section": "List items" }, { "markdown": "- one\n\n two\n", "html": "
    \n
  • \n

    one

    \n

    two

    \n
  • \n
\n", - "example": 219, - "start_line": 3761, - "end_line": 3772, + "example": 226, + "start_line": 3846, + "end_line": 3857, "section": "List items" }, { "markdown": " - one\n\n two\n", "html": "
    \n
  • one
  • \n
\n
 two\n
\n", - "example": 220, - "start_line": 3775, - "end_line": 3785, + "example": 227, + "start_line": 3860, + "end_line": 3870, "section": "List items" }, { "markdown": " - one\n\n two\n", "html": "
    \n
  • \n

    one

    \n

    two

    \n
  • \n
\n", - "example": 221, - "start_line": 3788, - "end_line": 3799, + "example": 228, + "start_line": 3873, + "end_line": 3884, "section": "List items" }, { "markdown": " > > 1. one\n>>\n>> two\n", "html": "
\n
\n
    \n
  1. \n

    one

    \n

    two

    \n
  2. \n
\n
\n
\n", - "example": 222, - "start_line": 3810, - "end_line": 3825, + "example": 229, + "start_line": 3895, + "end_line": 3910, "section": "List items" }, { "markdown": ">>- one\n>>\n > > two\n", "html": "
\n
\n
    \n
  • one
  • \n
\n

two

\n
\n
\n", - "example": 223, - "start_line": 3837, - "end_line": 3850, + "example": 230, + "start_line": 3922, + "end_line": 3935, "section": "List items" }, { "markdown": "-one\n\n2.two\n", "html": "

-one

\n

2.two

\n", - "example": 224, - "start_line": 3856, - "end_line": 3863, + "example": 231, + "start_line": 3941, + "end_line": 3948, "section": "List items" }, { "markdown": "- foo\n\n\n bar\n", "html": "
    \n
  • \n

    foo

    \n

    bar

    \n
  • \n
\n", - "example": 225, - "start_line": 3869, - "end_line": 3881, + "example": 232, + "start_line": 3954, + "end_line": 3966, "section": "List items" }, { "markdown": "1. foo\n\n ```\n bar\n ```\n\n baz\n\n > bam\n", "html": "
    \n
  1. \n

    foo

    \n
    bar\n
    \n

    baz

    \n
    \n

    bam

    \n
    \n
  2. \n
\n", - "example": 226, - "start_line": 3886, - "end_line": 3908, + "example": 233, + "start_line": 3971, + "end_line": 3993, "section": "List items" }, { "markdown": "- Foo\n\n bar\n\n\n baz\n", "html": "
    \n
  • \n

    Foo

    \n
    bar\n\n\nbaz\n
    \n
  • \n
\n", - "example": 227, - "start_line": 3914, - "end_line": 3932, + "example": 234, + "start_line": 3999, + "end_line": 4017, "section": "List items" }, { "markdown": "123456789. ok\n", "html": "
    \n
  1. ok
  2. \n
\n", - "example": 228, - "start_line": 3936, - "end_line": 3942, + "example": 235, + "start_line": 4021, + "end_line": 4027, "section": "List items" }, { "markdown": "1234567890. not ok\n", "html": "

1234567890. not ok

\n", - "example": 229, - "start_line": 3945, - "end_line": 3949, + "example": 236, + "start_line": 4030, + "end_line": 4034, "section": "List items" }, { "markdown": "0. ok\n", "html": "
    \n
  1. ok
  2. \n
\n", - "example": 230, - "start_line": 3954, - "end_line": 3960, + "example": 237, + "start_line": 4039, + "end_line": 4045, "section": "List items" }, { "markdown": "003. ok\n", "html": "
    \n
  1. ok
  2. \n
\n", - "example": 231, - "start_line": 3963, - "end_line": 3969, + "example": 238, + "start_line": 4048, + "end_line": 4054, "section": "List items" }, { "markdown": "-1. not ok\n", "html": "

-1. not ok

\n", - "example": 232, - "start_line": 3974, - "end_line": 3978, + "example": 239, + "start_line": 4059, + "end_line": 4063, "section": "List items" }, { "markdown": "- foo\n\n bar\n", "html": "
    \n
  • \n

    foo

    \n
    bar\n
    \n
  • \n
\n", - "example": 233, - "start_line": 3998, - "end_line": 4010, + "example": 240, + "start_line": 4082, + "end_line": 4094, "section": "List items" }, { "markdown": " 10. foo\n\n bar\n", "html": "
    \n
  1. \n

    foo

    \n
    bar\n
    \n
  2. \n
\n", - "example": 234, - "start_line": 4015, - "end_line": 4027, + "example": 241, + "start_line": 4099, + "end_line": 4111, "section": "List items" }, { "markdown": " indented code\n\nparagraph\n\n more code\n", "html": "
indented code\n
\n

paragraph

\n
more code\n
\n", - "example": 235, - "start_line": 4034, - "end_line": 4046, + "example": 242, + "start_line": 4118, + "end_line": 4130, "section": "List items" }, { "markdown": "1. indented code\n\n paragraph\n\n more code\n", "html": "
    \n
  1. \n
    indented code\n
    \n

    paragraph

    \n
    more code\n
    \n
  2. \n
\n", - "example": 236, - "start_line": 4049, - "end_line": 4065, + "example": 243, + "start_line": 4133, + "end_line": 4149, "section": "List items" }, { "markdown": "1. indented code\n\n paragraph\n\n more code\n", "html": "
    \n
  1. \n
     indented code\n
    \n

    paragraph

    \n
    more code\n
    \n
  2. \n
\n", - "example": 237, - "start_line": 4071, - "end_line": 4087, + "example": 244, + "start_line": 4155, + "end_line": 4171, "section": "List items" }, { "markdown": " foo\n\nbar\n", "html": "

foo

\n

bar

\n", - "example": 238, - "start_line": 4098, - "end_line": 4105, + "example": 245, + "start_line": 4182, + "end_line": 4189, "section": "List items" }, { "markdown": "- foo\n\n bar\n", "html": "
    \n
  • foo
  • \n
\n

bar

\n", - "example": 239, - "start_line": 4108, - "end_line": 4117, + "example": 246, + "start_line": 4192, + "end_line": 4201, "section": "List items" }, { "markdown": "- foo\n\n bar\n", "html": "
    \n
  • \n

    foo

    \n

    bar

    \n
  • \n
\n", - "example": 240, - "start_line": 4125, - "end_line": 4136, + "example": 247, + "start_line": 4209, + "end_line": 4220, "section": "List items" }, { "markdown": "-\n foo\n-\n ```\n bar\n ```\n-\n baz\n", "html": "
    \n
  • foo
  • \n
  • \n
    bar\n
    \n
  • \n
  • \n
    baz\n
    \n
  • \n
\n", - "example": 241, - "start_line": 4153, - "end_line": 4174, + "example": 248, + "start_line": 4237, + "end_line": 4258, "section": "List items" }, { "markdown": "- \n foo\n", "html": "
    \n
  • foo
  • \n
\n", - "example": 242, - "start_line": 4179, - "end_line": 4186, + "example": 249, + "start_line": 4263, + "end_line": 4270, "section": "List items" }, { "markdown": "-\n\n foo\n", "html": "
    \n
  • \n
\n

foo

\n", - "example": 243, - "start_line": 4193, - "end_line": 4202, + "example": 250, + "start_line": 4277, + "end_line": 4286, "section": "List items" }, { "markdown": "- foo\n-\n- bar\n", "html": "
    \n
  • foo
  • \n
  • \n
  • bar
  • \n
\n", - "example": 244, - "start_line": 4207, - "end_line": 4217, + "example": 251, + "start_line": 4291, + "end_line": 4301, "section": "List items" }, { "markdown": "- foo\n- \n- bar\n", "html": "
    \n
  • foo
  • \n
  • \n
  • bar
  • \n
\n", - "example": 245, - "start_line": 4222, - "end_line": 4232, + "example": 252, + "start_line": 4306, + "end_line": 4316, "section": "List items" }, { "markdown": "1. foo\n2.\n3. bar\n", "html": "
    \n
  1. foo
  2. \n
  3. \n
  4. bar
  5. \n
\n", - "example": 246, - "start_line": 4237, - "end_line": 4247, + "example": 253, + "start_line": 4321, + "end_line": 4331, "section": "List items" }, { "markdown": "*\n", "html": "
    \n
  • \n
\n", - "example": 247, - "start_line": 4252, - "end_line": 4258, + "example": 254, + "start_line": 4336, + "end_line": 4342, "section": "List items" }, { "markdown": "foo\n*\n\nfoo\n1.\n", "html": "

foo\n*

\n

foo\n1.

\n", - "example": 248, - "start_line": 4262, - "end_line": 4273, + "example": 255, + "start_line": 4346, + "end_line": 4357, "section": "List items" }, { "markdown": " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n", "html": "
    \n
  1. \n

    A paragraph\nwith two lines.

    \n
    indented code\n
    \n
    \n

    A block quote.

    \n
    \n
  2. \n
\n", - "example": 249, - "start_line": 4284, - "end_line": 4303, + "example": 256, + "start_line": 4368, + "end_line": 4387, "section": "List items" }, { "markdown": " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n", "html": "
    \n
  1. \n

    A paragraph\nwith two lines.

    \n
    indented code\n
    \n
    \n

    A block quote.

    \n
    \n
  2. \n
\n", - "example": 250, - "start_line": 4308, - "end_line": 4327, + "example": 257, + "start_line": 4392, + "end_line": 4411, "section": "List items" }, { "markdown": " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n", "html": "
    \n
  1. \n

    A paragraph\nwith two lines.

    \n
    indented code\n
    \n
    \n

    A block quote.

    \n
    \n
  2. \n
\n", - "example": 251, - "start_line": 4332, - "end_line": 4351, + "example": 258, + "start_line": 4416, + "end_line": 4435, "section": "List items" }, { "markdown": " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n", "html": "
1.  A paragraph\n    with two lines.\n\n        indented code\n\n    > A block quote.\n
\n", - "example": 252, - "start_line": 4356, - "end_line": 4371, + "example": 259, + "start_line": 4440, + "end_line": 4455, "section": "List items" }, { "markdown": " 1. A paragraph\nwith two lines.\n\n indented code\n\n > A block quote.\n", "html": "
    \n
  1. \n

    A paragraph\nwith two lines.

    \n
    indented code\n
    \n
    \n

    A block quote.

    \n
    \n
  2. \n
\n", - "example": 253, - "start_line": 4386, - "end_line": 4405, + "example": 260, + "start_line": 4470, + "end_line": 4489, "section": "List items" }, { "markdown": " 1. A paragraph\n with two lines.\n", "html": "
    \n
  1. A paragraph\nwith two lines.
  2. \n
\n", - "example": 254, - "start_line": 4410, - "end_line": 4418, + "example": 261, + "start_line": 4494, + "end_line": 4502, "section": "List items" }, { "markdown": "> 1. > Blockquote\ncontinued here.\n", "html": "
\n
    \n
  1. \n
    \n

    Blockquote\ncontinued here.

    \n
    \n
  2. \n
\n
\n", - "example": 255, - "start_line": 4423, - "end_line": 4437, + "example": 262, + "start_line": 4507, + "end_line": 4521, "section": "List items" }, { "markdown": "> 1. > Blockquote\n> continued here.\n", "html": "
\n
    \n
  1. \n
    \n

    Blockquote\ncontinued here.

    \n
    \n
  2. \n
\n
\n", - "example": 256, - "start_line": 4440, - "end_line": 4454, + "example": 263, + "start_line": 4524, + "end_line": 4538, "section": "List items" }, { "markdown": "- foo\n - bar\n - baz\n - boo\n", "html": "
    \n
  • foo\n
      \n
    • bar\n
        \n
      • baz\n
          \n
        • boo
        • \n
        \n
      • \n
      \n
    • \n
    \n
  • \n
\n", - "example": 257, - "start_line": 4467, - "end_line": 4488, + "example": 264, + "start_line": 4552, + "end_line": 4573, "section": "List items" }, { "markdown": "- foo\n - bar\n - baz\n - boo\n", "html": "
    \n
  • foo
  • \n
  • bar
  • \n
  • baz
  • \n
  • boo
  • \n
\n", - "example": 258, - "start_line": 4493, - "end_line": 4505, + "example": 265, + "start_line": 4578, + "end_line": 4590, "section": "List items" }, { "markdown": "10) foo\n - bar\n", "html": "
    \n
  1. foo\n
      \n
    • bar
    • \n
    \n
  2. \n
\n", - "example": 259, - "start_line": 4510, - "end_line": 4521, + "example": 266, + "start_line": 4595, + "end_line": 4606, "section": "List items" }, { "markdown": "10) foo\n - bar\n", "html": "
    \n
  1. foo
  2. \n
\n
    \n
  • bar
  • \n
\n", - "example": 260, - "start_line": 4526, - "end_line": 4536, + "example": 267, + "start_line": 4611, + "end_line": 4621, "section": "List items" }, { "markdown": "- - foo\n", "html": "
    \n
  • \n
      \n
    • foo
    • \n
    \n
  • \n
\n", - "example": 261, - "start_line": 4541, - "end_line": 4551, + "example": 268, + "start_line": 4626, + "end_line": 4636, "section": "List items" }, { "markdown": "1. - 2. foo\n", "html": "
    \n
  1. \n
      \n
    • \n
        \n
      1. foo
      2. \n
      \n
    • \n
    \n
  2. \n
\n", - "example": 262, - "start_line": 4554, - "end_line": 4568, + "example": 269, + "start_line": 4639, + "end_line": 4653, "section": "List items" }, { "markdown": "- # Foo\n- Bar\n ---\n baz\n", "html": "
    \n
  • \n

    Foo

    \n
  • \n
  • \n

    Bar

    \nbaz
  • \n
\n", - "example": 263, - "start_line": 4573, - "end_line": 4587, + "example": 270, + "start_line": 4658, + "end_line": 4672, "section": "List items" }, { "markdown": "- foo\n- bar\n+ baz\n", "html": "
    \n
  • foo
  • \n
  • bar
  • \n
\n
    \n
  • baz
  • \n
\n", - "example": 264, - "start_line": 4809, - "end_line": 4821, + "example": 271, + "start_line": 4894, + "end_line": 4906, "section": "Lists" }, { "markdown": "1. foo\n2. bar\n3) baz\n", "html": "
    \n
  1. foo
  2. \n
  3. bar
  4. \n
\n
    \n
  1. baz
  2. \n
\n", - "example": 265, - "start_line": 4824, - "end_line": 4836, + "example": 272, + "start_line": 4909, + "end_line": 4921, "section": "Lists" }, { "markdown": "Foo\n- bar\n- baz\n", "html": "

Foo

\n
    \n
  • bar
  • \n
  • baz
  • \n
\n", - "example": 266, - "start_line": 4843, - "end_line": 4853, + "example": 273, + "start_line": 4928, + "end_line": 4938, "section": "Lists" }, { "markdown": "The number of windows in my house is\n14. The number of doors is 6.\n", "html": "

The number of windows in my house is\n14. The number of doors is 6.

\n", - "example": 267, - "start_line": 4920, - "end_line": 4926, + "example": 274, + "start_line": 5005, + "end_line": 5011, "section": "Lists" }, { "markdown": "The number of windows in my house is\n1. The number of doors is 6.\n", "html": "

The number of windows in my house is

\n
    \n
  1. The number of doors is 6.
  2. \n
\n", - "example": 268, - "start_line": 4930, - "end_line": 4938, + "example": 275, + "start_line": 5015, + "end_line": 5023, "section": "Lists" }, { "markdown": "- foo\n\n- bar\n\n\n- baz\n", "html": "
    \n
  • \n

    foo

    \n
  • \n
  • \n

    bar

    \n
  • \n
  • \n

    baz

    \n
  • \n
\n", - "example": 269, - "start_line": 4944, - "end_line": 4963, + "example": 276, + "start_line": 5029, + "end_line": 5048, "section": "Lists" }, { "markdown": "- foo\n - bar\n - baz\n\n\n bim\n", "html": "
    \n
  • foo\n
      \n
    • bar\n
        \n
      • \n

        baz

        \n

        bim

        \n
      • \n
      \n
    • \n
    \n
  • \n
\n", - "example": 270, - "start_line": 4965, - "end_line": 4987, + "example": 277, + "start_line": 5050, + "end_line": 5072, "section": "Lists" }, { "markdown": "- foo\n- bar\n\n\n\n- baz\n- bim\n", "html": "
    \n
  • foo
  • \n
  • bar
  • \n
\n\n
    \n
  • baz
  • \n
  • bim
  • \n
\n", - "example": 271, - "start_line": 4995, - "end_line": 5013, + "example": 278, + "start_line": 5080, + "end_line": 5098, "section": "Lists" }, { "markdown": "- foo\n\n notcode\n\n- foo\n\n\n\n code\n", "html": "
    \n
  • \n

    foo

    \n

    notcode

    \n
  • \n
  • \n

    foo

    \n
  • \n
\n\n
code\n
\n", - "example": 272, - "start_line": 5016, - "end_line": 5039, + "example": 279, + "start_line": 5101, + "end_line": 5124, "section": "Lists" }, { - "markdown": "- a\n - b\n - c\n - d\n - e\n - f\n - g\n - h\n- i\n", - "html": "
    \n
  • a
  • \n
  • b
  • \n
  • c
  • \n
  • d
  • \n
  • e
  • \n
  • f
  • \n
  • g
  • \n
  • h
  • \n
  • i
  • \n
\n", - "example": 273, - "start_line": 5047, - "end_line": 5069, + "markdown": "- a\n - b\n - c\n - d\n - e\n - f\n- g\n", + "html": "
    \n
  • a
  • \n
  • b
  • \n
  • c
  • \n
  • d
  • \n
  • e
  • \n
  • f
  • \n
  • g
  • \n
\n", + "example": 280, + "start_line": 5132, + "end_line": 5150, "section": "Lists" }, { - "markdown": "1. a\n\n 2. b\n\n 3. c\n", + "markdown": "1. a\n\n 2. b\n\n 3. c\n", "html": "
    \n
  1. \n

    a

    \n
  2. \n
  3. \n

    b

    \n
  4. \n
  5. \n

    c

    \n
  6. \n
\n", - "example": 274, - "start_line": 5072, - "end_line": 5090, + "example": 281, + "start_line": 5153, + "end_line": 5171, + "section": "Lists" + }, + { + "markdown": "- a\n - b\n - c\n - d\n - e\n", + "html": "
    \n
  • a
  • \n
  • b
  • \n
  • c
  • \n
  • d\n- e
  • \n
\n", + "example": 282, + "start_line": 5177, + "end_line": 5191, + "section": "Lists" + }, + { + "markdown": "1. a\n\n 2. b\n\n 3. c\n", + "html": "
    \n
  1. \n

    a

    \n
  2. \n
  3. \n

    b

    \n
  4. \n
\n
3. c\n
\n", + "example": 283, + "start_line": 5197, + "end_line": 5214, "section": "Lists" }, { "markdown": "- a\n- b\n\n- c\n", "html": "
    \n
  • \n

    a

    \n
  • \n
  • \n

    b

    \n
  • \n
  • \n

    c

    \n
  • \n
\n", - "example": 275, - "start_line": 5096, - "end_line": 5113, + "example": 284, + "start_line": 5220, + "end_line": 5237, "section": "Lists" }, { "markdown": "* a\n*\n\n* c\n", "html": "
    \n
  • \n

    a

    \n
  • \n
  • \n
  • \n

    c

    \n
  • \n
\n", - "example": 276, - "start_line": 5118, - "end_line": 5133, + "example": 285, + "start_line": 5242, + "end_line": 5257, "section": "Lists" }, { "markdown": "- a\n- b\n\n c\n- d\n", "html": "
    \n
  • \n

    a

    \n
  • \n
  • \n

    b

    \n

    c

    \n
  • \n
  • \n

    d

    \n
  • \n
\n", - "example": 277, - "start_line": 5140, - "end_line": 5159, + "example": 286, + "start_line": 5264, + "end_line": 5283, "section": "Lists" }, { "markdown": "- a\n- b\n\n [ref]: /url\n- d\n", "html": "
    \n
  • \n

    a

    \n
  • \n
  • \n

    b

    \n
  • \n
  • \n

    d

    \n
  • \n
\n", - "example": 278, - "start_line": 5162, - "end_line": 5180, + "example": 287, + "start_line": 5286, + "end_line": 5304, "section": "Lists" }, { "markdown": "- a\n- ```\n b\n\n\n ```\n- c\n", "html": "
    \n
  • a
  • \n
  • \n
    b\n\n\n
    \n
  • \n
  • c
  • \n
\n", - "example": 279, - "start_line": 5185, - "end_line": 5204, + "example": 288, + "start_line": 5309, + "end_line": 5328, "section": "Lists" }, { "markdown": "- a\n - b\n\n c\n- d\n", "html": "
    \n
  • a\n
      \n
    • \n

      b

      \n

      c

      \n
    • \n
    \n
  • \n
  • d
  • \n
\n", - "example": 280, - "start_line": 5211, - "end_line": 5229, + "example": 289, + "start_line": 5335, + "end_line": 5353, "section": "Lists" }, { "markdown": "* a\n > b\n >\n* c\n", "html": "
    \n
  • a\n
    \n

    b

    \n
    \n
  • \n
  • c
  • \n
\n", - "example": 281, - "start_line": 5235, - "end_line": 5249, + "example": 290, + "start_line": 5359, + "end_line": 5373, "section": "Lists" }, { "markdown": "- a\n > b\n ```\n c\n ```\n- d\n", "html": "
    \n
  • a\n
    \n

    b

    \n
    \n
    c\n
    \n
  • \n
  • d
  • \n
\n", - "example": 282, - "start_line": 5255, - "end_line": 5273, + "example": 291, + "start_line": 5379, + "end_line": 5397, "section": "Lists" }, { "markdown": "- a\n", "html": "
    \n
  • a
  • \n
\n", - "example": 283, - "start_line": 5278, - "end_line": 5284, + "example": 292, + "start_line": 5402, + "end_line": 5408, "section": "Lists" }, { "markdown": "- a\n - b\n", "html": "
    \n
  • a\n
      \n
    • b
    • \n
    \n
  • \n
\n", - "example": 284, - "start_line": 5287, - "end_line": 5298, + "example": 293, + "start_line": 5411, + "end_line": 5422, "section": "Lists" }, { "markdown": "1. ```\n foo\n ```\n\n bar\n", "html": "
    \n
  1. \n
    foo\n
    \n

    bar

    \n
  2. \n
\n", - "example": 285, - "start_line": 5304, - "end_line": 5318, + "example": 294, + "start_line": 5428, + "end_line": 5442, "section": "Lists" }, { "markdown": "* foo\n * bar\n\n baz\n", "html": "
    \n
  • \n

    foo

    \n
      \n
    • bar
    • \n
    \n

    baz

    \n
  • \n
\n", - "example": 286, - "start_line": 5323, - "end_line": 5338, + "example": 295, + "start_line": 5447, + "end_line": 5462, "section": "Lists" }, { "markdown": "- a\n - b\n - c\n\n- d\n - e\n - f\n", "html": "
    \n
  • \n

    a

    \n
      \n
    • b
    • \n
    • c
    • \n
    \n
  • \n
  • \n

    d

    \n
      \n
    • e
    • \n
    • f
    • \n
    \n
  • \n
\n", - "example": 287, - "start_line": 5341, - "end_line": 5366, + "example": 296, + "start_line": 5465, + "end_line": 5490, "section": "Lists" }, { "markdown": "`hi`lo`\n", "html": "

hilo`

\n", - "example": 288, - "start_line": 5375, - "end_line": 5379, + "example": 297, + "start_line": 5499, + "end_line": 5503, "section": "Inlines" }, { "markdown": "\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\]\\^\\_\\`\\{\\|\\}\\~\n", "html": "

!"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~

\n", - "example": 289, - "start_line": 5389, - "end_line": 5393, + "example": 298, + "start_line": 5513, + "end_line": 5517, "section": "Backslash escapes" }, { "markdown": "\\\t\\A\\a\\ \\3\\φ\\«\n", "html": "

\\\t\\A\\a\\ \\3\\φ\\«

\n", - "example": 290, - "start_line": 5399, - "end_line": 5403, + "example": 299, + "start_line": 5523, + "end_line": 5527, "section": "Backslash escapes" }, { - "markdown": "\\*not emphasized*\n\\
not a tag\n\\[not a link](/foo)\n\\`not code`\n1\\. not a list\n\\* not a list\n\\# not a heading\n\\[foo]: /url \"not a reference\"\n", - "html": "

*not emphasized*\n<br/> not a tag\n[not a link](/foo)\n`not code`\n1. not a list\n* not a list\n# not a heading\n[foo]: /url "not a reference"

\n", - "example": 291, - "start_line": 5409, - "end_line": 5427, + "markdown": "\\*not emphasized*\n\\
not a tag\n\\[not a link](/foo)\n\\`not code`\n1\\. not a list\n\\* not a list\n\\# not a heading\n\\[foo]: /url \"not a reference\"\n\\ö not a character entity\n", + "html": "

*not emphasized*\n<br/> not a tag\n[not a link](/foo)\n`not code`\n1. not a list\n* not a list\n# not a heading\n[foo]: /url "not a reference"\n&ouml; not a character entity

\n", + "example": 300, + "start_line": 5533, + "end_line": 5553, "section": "Backslash escapes" }, { "markdown": "\\\\*emphasis*\n", "html": "

\\emphasis

\n", - "example": 292, - "start_line": 5432, - "end_line": 5436, + "example": 301, + "start_line": 5558, + "end_line": 5562, "section": "Backslash escapes" }, { "markdown": "foo\\\nbar\n", "html": "

foo
\nbar

\n", - "example": 293, - "start_line": 5441, - "end_line": 5447, + "example": 302, + "start_line": 5567, + "end_line": 5573, "section": "Backslash escapes" }, { "markdown": "`` \\[\\` ``\n", "html": "

\\[\\`

\n", - "example": 294, - "start_line": 5453, - "end_line": 5457, + "example": 303, + "start_line": 5579, + "end_line": 5583, "section": "Backslash escapes" }, { "markdown": " \\[\\]\n", "html": "
\\[\\]\n
\n", - "example": 295, - "start_line": 5460, - "end_line": 5465, + "example": 304, + "start_line": 5586, + "end_line": 5591, "section": "Backslash escapes" }, { "markdown": "~~~\n\\[\\]\n~~~\n", "html": "
\\[\\]\n
\n", - "example": 296, - "start_line": 5468, - "end_line": 5475, + "example": 305, + "start_line": 5594, + "end_line": 5601, "section": "Backslash escapes" }, { "markdown": "\n", "html": "

http://example.com?find=\\*

\n", - "example": 297, - "start_line": 5478, - "end_line": 5482, + "example": 306, + "start_line": 5604, + "end_line": 5608, "section": "Backslash escapes" }, { "markdown": "\n", "html": "\n", - "example": 298, - "start_line": 5485, - "end_line": 5489, + "example": 307, + "start_line": 5611, + "end_line": 5615, "section": "Backslash escapes" }, { "markdown": "[foo](/bar\\* \"ti\\*tle\")\n", "html": "

foo

\n", - "example": 299, - "start_line": 5495, - "end_line": 5499, + "example": 308, + "start_line": 5621, + "end_line": 5625, "section": "Backslash escapes" }, { "markdown": "[foo]\n\n[foo]: /bar\\* \"ti\\*tle\"\n", "html": "

foo

\n", - "example": 300, - "start_line": 5502, - "end_line": 5508, + "example": 309, + "start_line": 5628, + "end_line": 5634, "section": "Backslash escapes" }, { "markdown": "``` foo\\+bar\nfoo\n```\n", "html": "
foo\n
\n", - "example": 301, - "start_line": 5511, - "end_line": 5518, + "example": 310, + "start_line": 5637, + "end_line": 5644, "section": "Backslash escapes" }, { "markdown": "  & © Æ Ď\n¾ ℋ ⅆ\n∲ ≧̸\n", "html": "

  & © Æ Ď\n¾ ℋ ⅆ\n∲ ≧̸

\n", - "example": 302, - "start_line": 5538, - "end_line": 5546, + "example": 311, + "start_line": 5674, + "end_line": 5682, "section": "Entity and numeric character references" }, { - "markdown": "# Ӓ Ϡ � �\n", - "html": "

# Ӓ Ϡ � �

\n", - "example": 303, - "start_line": 5557, - "end_line": 5561, + "markdown": "# Ӓ Ϡ �\n", + "html": "

# Ӓ Ϡ �

\n", + "example": 312, + "start_line": 5693, + "end_line": 5697, "section": "Entity and numeric character references" }, { "markdown": "" ആ ಫ\n", "html": "

" ആ ಫ

\n", - "example": 304, - "start_line": 5570, - "end_line": 5574, + "example": 313, + "start_line": 5706, + "end_line": 5710, "section": "Entity and numeric character references" }, { - "markdown": "  &x; &#; &#x;\n&ThisIsNotDefined; &hi?;\n", - "html": "

&nbsp &x; &#; &#x;\n&ThisIsNotDefined; &hi?;

\n", - "example": 305, - "start_line": 5579, - "end_line": 5585, + "markdown": "  &x; &#; &#x;\n�\n&#abcdef0;\n&ThisIsNotDefined; &hi?;\n", + "html": "

&nbsp &x; &#; &#x;\n&#987654321;\n&#abcdef0;\n&ThisIsNotDefined; &hi?;

\n", + "example": 314, + "start_line": 5715, + "end_line": 5725, "section": "Entity and numeric character references" }, { "markdown": "©\n", "html": "

&copy

\n", - "example": 306, - "start_line": 5592, - "end_line": 5596, + "example": 315, + "start_line": 5732, + "end_line": 5736, "section": "Entity and numeric character references" }, { "markdown": "&MadeUpEntity;\n", "html": "

&MadeUpEntity;

\n", - "example": 307, - "start_line": 5602, - "end_line": 5606, + "example": 316, + "start_line": 5742, + "end_line": 5746, "section": "Entity and numeric character references" }, { "markdown": "\n", "html": "\n", - "example": 308, - "start_line": 5613, - "end_line": 5617, + "example": 317, + "start_line": 5753, + "end_line": 5757, "section": "Entity and numeric character references" }, { "markdown": "[foo](/föö \"föö\")\n", "html": "

foo

\n", - "example": 309, - "start_line": 5620, - "end_line": 5624, + "example": 318, + "start_line": 5760, + "end_line": 5764, "section": "Entity and numeric character references" }, { "markdown": "[foo]\n\n[foo]: /föö \"föö\"\n", "html": "

foo

\n", - "example": 310, - "start_line": 5627, - "end_line": 5633, + "example": 319, + "start_line": 5767, + "end_line": 5773, "section": "Entity and numeric character references" }, { "markdown": "``` föö\nfoo\n```\n", "html": "
foo\n
\n", - "example": 311, - "start_line": 5636, - "end_line": 5643, + "example": 320, + "start_line": 5776, + "end_line": 5783, "section": "Entity and numeric character references" }, { "markdown": "`föö`\n", "html": "

f&ouml;&ouml;

\n", - "example": 312, - "start_line": 5649, - "end_line": 5653, + "example": 321, + "start_line": 5789, + "end_line": 5793, "section": "Entity and numeric character references" }, { "markdown": " föfö\n", "html": "
f&ouml;f&ouml;\n
\n", - "example": 313, - "start_line": 5656, - "end_line": 5661, + "example": 322, + "start_line": 5796, + "end_line": 5801, + "section": "Entity and numeric character references" + }, + { + "markdown": "*foo*\n*foo*\n", + "html": "

*foo*\nfoo

\n", + "example": 323, + "start_line": 5808, + "end_line": 5814, + "section": "Entity and numeric character references" + }, + { + "markdown": "* foo\n\n* foo\n", + "html": "

* foo

\n
    \n
  • foo
  • \n
\n", + "example": 324, + "start_line": 5816, + "end_line": 5825, + "section": "Entity and numeric character references" + }, + { + "markdown": "foo bar\n", + "html": "

foo\n\nbar

\n", + "example": 325, + "start_line": 5827, + "end_line": 5833, + "section": "Entity and numeric character references" + }, + { + "markdown": " foo\n", + "html": "

\tfoo

\n", + "example": 326, + "start_line": 5835, + "end_line": 5839, + "section": "Entity and numeric character references" + }, + { + "markdown": "[a](url "tit")\n", + "html": "

[a](url "tit")

\n", + "example": 327, + "start_line": 5842, + "end_line": 5846, "section": "Entity and numeric character references" }, { "markdown": "`foo`\n", "html": "

foo

\n", - "example": 314, - "start_line": 5678, - "end_line": 5682, + "example": 328, + "start_line": 5870, + "end_line": 5874, "section": "Code spans" }, { - "markdown": "`` foo ` bar ``\n", + "markdown": "`` foo ` bar ``\n", "html": "

foo ` bar

\n", - "example": 315, - "start_line": 5688, - "end_line": 5692, + "example": 329, + "start_line": 5881, + "end_line": 5885, "section": "Code spans" }, { "markdown": "` `` `\n", "html": "

``

\n", - "example": 316, - "start_line": 5698, - "end_line": 5702, + "example": 330, + "start_line": 5891, + "end_line": 5895, "section": "Code spans" }, { - "markdown": "``\nfoo\n``\n", - "html": "

foo

\n", - "example": 317, - "start_line": 5707, - "end_line": 5713, + "markdown": "` `` `\n", + "html": "

``

\n", + "example": 331, + "start_line": 5899, + "end_line": 5903, "section": "Code spans" }, { - "markdown": "`foo bar\n baz`\n", - "html": "

foo bar baz

\n", - "example": 318, - "start_line": 5719, - "end_line": 5724, + "markdown": "` a`\n", + "html": "

a

\n", + "example": 332, + "start_line": 5908, + "end_line": 5912, "section": "Code spans" }, { - "markdown": "`a  b`\n", - "html": "

a  b

\n", - "example": 319, - "start_line": 5730, - "end_line": 5734, + "markdown": "` b `\n", + "html": "

 b 

\n", + "example": 333, + "start_line": 5917, + "end_line": 5921, "section": "Code spans" }, { - "markdown": "`foo `` bar`\n", - "html": "

foo `` bar

\n", - "example": 320, - "start_line": 5750, - "end_line": 5754, + "markdown": "` `\n` `\n", + "html": "

 \n

\n", + "example": 334, + "start_line": 5925, + "end_line": 5931, + "section": "Code spans" + }, + { + "markdown": "``\nfoo\nbar \nbaz\n``\n", + "html": "

foo bar baz

\n", + "example": 335, + "start_line": 5936, + "end_line": 5944, + "section": "Code spans" + }, + { + "markdown": "``\nfoo \n``\n", + "html": "

foo

\n", + "example": 336, + "start_line": 5946, + "end_line": 5952, + "section": "Code spans" + }, + { + "markdown": "`foo bar \nbaz`\n", + "html": "

foo bar baz

\n", + "example": 337, + "start_line": 5957, + "end_line": 5962, "section": "Code spans" }, { "markdown": "`foo\\`bar`\n", "html": "

foo\\bar`

\n", - "example": 321, - "start_line": 5760, - "end_line": 5764, + "example": 338, + "start_line": 5974, + "end_line": 5978, + "section": "Code spans" + }, + { + "markdown": "``foo`bar``\n", + "html": "

foo`bar

\n", + "example": 339, + "start_line": 5985, + "end_line": 5989, + "section": "Code spans" + }, + { + "markdown": "` foo `` bar `\n", + "html": "

foo `` bar

\n", + "example": 340, + "start_line": 5991, + "end_line": 5995, "section": "Code spans" }, { "markdown": "*foo`*`\n", "html": "

*foo*

\n", - "example": 322, - "start_line": 5776, - "end_line": 5780, + "example": 341, + "start_line": 6003, + "end_line": 6007, "section": "Code spans" }, { "markdown": "[not a `link](/foo`)\n", "html": "

[not a link](/foo)

\n", - "example": 323, - "start_line": 5785, - "end_line": 5789, + "example": 342, + "start_line": 6012, + "end_line": 6016, "section": "Code spans" }, { "markdown": "``\n", "html": "

<a href="">`

\n", - "example": 324, - "start_line": 5795, - "end_line": 5799, + "example": 343, + "start_line": 6022, + "end_line": 6026, "section": "Code spans" }, { "markdown": "
`\n", "html": "

`

\n", - "example": 325, - "start_line": 5804, - "end_line": 5808, + "example": 344, + "start_line": 6031, + "end_line": 6035, "section": "Code spans" }, { "markdown": "``\n", "html": "

<http://foo.bar.baz>`

\n", - "example": 326, - "start_line": 5813, - "end_line": 5817, + "example": 345, + "start_line": 6040, + "end_line": 6044, "section": "Code spans" }, { "markdown": "`\n", "html": "

http://foo.bar.`baz`

\n", - "example": 327, - "start_line": 5822, - "end_line": 5826, + "example": 346, + "start_line": 6049, + "end_line": 6053, "section": "Code spans" }, { "markdown": "```foo``\n", "html": "

```foo``

\n", - "example": 328, - "start_line": 5832, - "end_line": 5836, + "example": 347, + "start_line": 6059, + "end_line": 6063, "section": "Code spans" }, { "markdown": "`foo\n", "html": "

`foo

\n", - "example": 329, - "start_line": 5839, - "end_line": 5843, + "example": 348, + "start_line": 6066, + "end_line": 6070, "section": "Code spans" }, { "markdown": "`foo``bar``\n", "html": "

`foobar

\n", - "example": 330, - "start_line": 5848, - "end_line": 5852, + "example": 349, + "start_line": 6075, + "end_line": 6079, "section": "Code spans" }, { "markdown": "*foo bar*\n", "html": "

foo bar

\n", - "example": 331, - "start_line": 6061, - "end_line": 6065, + "example": 350, + "start_line": 6292, + "end_line": 6296, "section": "Emphasis and strong emphasis" }, { "markdown": "a * foo bar*\n", "html": "

a * foo bar*

\n", - "example": 332, - "start_line": 6071, - "end_line": 6075, + "example": 351, + "start_line": 6302, + "end_line": 6306, "section": "Emphasis and strong emphasis" }, { "markdown": "a*\"foo\"*\n", "html": "

a*"foo"*

\n", - "example": 333, - "start_line": 6082, - "end_line": 6086, + "example": 352, + "start_line": 6313, + "end_line": 6317, "section": "Emphasis and strong emphasis" }, { "markdown": "* a *\n", "html": "

* a *

\n", - "example": 334, - "start_line": 6091, - "end_line": 6095, + "example": 353, + "start_line": 6322, + "end_line": 6326, "section": "Emphasis and strong emphasis" }, { "markdown": "foo*bar*\n", "html": "

foobar

\n", - "example": 335, - "start_line": 6100, - "end_line": 6104, + "example": 354, + "start_line": 6331, + "end_line": 6335, "section": "Emphasis and strong emphasis" }, { "markdown": "5*6*78\n", "html": "

5678

\n", - "example": 336, - "start_line": 6107, - "end_line": 6111, + "example": 355, + "start_line": 6338, + "end_line": 6342, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo bar_\n", "html": "

foo bar

\n", - "example": 337, - "start_line": 6116, - "end_line": 6120, + "example": 356, + "start_line": 6347, + "end_line": 6351, "section": "Emphasis and strong emphasis" }, { "markdown": "_ foo bar_\n", "html": "

_ foo bar_

\n", - "example": 338, - "start_line": 6126, - "end_line": 6130, + "example": 357, + "start_line": 6357, + "end_line": 6361, "section": "Emphasis and strong emphasis" }, { "markdown": "a_\"foo\"_\n", "html": "

a_"foo"_

\n", - "example": 339, - "start_line": 6136, - "end_line": 6140, + "example": 358, + "start_line": 6367, + "end_line": 6371, "section": "Emphasis and strong emphasis" }, { "markdown": "foo_bar_\n", "html": "

foo_bar_

\n", - "example": 340, - "start_line": 6145, - "end_line": 6149, + "example": 359, + "start_line": 6376, + "end_line": 6380, "section": "Emphasis and strong emphasis" }, { "markdown": "5_6_78\n", "html": "

5_6_78

\n", - "example": 341, - "start_line": 6152, - "end_line": 6156, + "example": 360, + "start_line": 6383, + "end_line": 6387, "section": "Emphasis and strong emphasis" }, { "markdown": "пристаням_стремятся_\n", "html": "

пристаням_стремятся_

\n", - "example": 342, - "start_line": 6159, - "end_line": 6163, + "example": 361, + "start_line": 6390, + "end_line": 6394, "section": "Emphasis and strong emphasis" }, { "markdown": "aa_\"bb\"_cc\n", "html": "

aa_"bb"_cc

\n", - "example": 343, - "start_line": 6169, - "end_line": 6173, + "example": 362, + "start_line": 6400, + "end_line": 6404, "section": "Emphasis and strong emphasis" }, { "markdown": "foo-_(bar)_\n", "html": "

foo-(bar)

\n", - "example": 344, - "start_line": 6180, - "end_line": 6184, + "example": 363, + "start_line": 6411, + "end_line": 6415, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo*\n", "html": "

_foo*

\n", - "example": 345, - "start_line": 6192, - "end_line": 6196, + "example": 364, + "start_line": 6423, + "end_line": 6427, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo bar *\n", "html": "

*foo bar *

\n", - "example": 346, - "start_line": 6202, - "end_line": 6206, + "example": 365, + "start_line": 6433, + "end_line": 6437, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo bar\n*\n", "html": "

*foo bar\n*

\n", - "example": 347, - "start_line": 6211, - "end_line": 6217, + "example": 366, + "start_line": 6442, + "end_line": 6448, "section": "Emphasis and strong emphasis" }, { "markdown": "*(*foo)\n", "html": "

*(*foo)

\n", - "example": 348, - "start_line": 6224, - "end_line": 6228, + "example": 367, + "start_line": 6455, + "end_line": 6459, "section": "Emphasis and strong emphasis" }, { "markdown": "*(*foo*)*\n", "html": "

(foo)

\n", - "example": 349, - "start_line": 6234, - "end_line": 6238, + "example": 368, + "start_line": 6465, + "end_line": 6469, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo*bar\n", "html": "

foobar

\n", - "example": 350, - "start_line": 6243, - "end_line": 6247, + "example": 369, + "start_line": 6474, + "end_line": 6478, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo bar _\n", "html": "

_foo bar _

\n", - "example": 351, - "start_line": 6256, - "end_line": 6260, + "example": 370, + "start_line": 6487, + "end_line": 6491, "section": "Emphasis and strong emphasis" }, { "markdown": "_(_foo)\n", "html": "

_(_foo)

\n", - "example": 352, - "start_line": 6266, - "end_line": 6270, + "example": 371, + "start_line": 6497, + "end_line": 6501, "section": "Emphasis and strong emphasis" }, { "markdown": "_(_foo_)_\n", "html": "

(foo)

\n", - "example": 353, - "start_line": 6275, - "end_line": 6279, + "example": 372, + "start_line": 6506, + "end_line": 6510, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo_bar\n", "html": "

_foo_bar

\n", - "example": 354, - "start_line": 6284, - "end_line": 6288, + "example": 373, + "start_line": 6515, + "end_line": 6519, "section": "Emphasis and strong emphasis" }, { "markdown": "_пристаням_стремятся\n", "html": "

_пристаням_стремятся

\n", - "example": 355, - "start_line": 6291, - "end_line": 6295, + "example": 374, + "start_line": 6522, + "end_line": 6526, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo_bar_baz_\n", "html": "

foo_bar_baz

\n", - "example": 356, - "start_line": 6298, - "end_line": 6302, + "example": 375, + "start_line": 6529, + "end_line": 6533, "section": "Emphasis and strong emphasis" }, { "markdown": "_(bar)_.\n", "html": "

(bar).

\n", - "example": 357, - "start_line": 6309, - "end_line": 6313, + "example": 376, + "start_line": 6540, + "end_line": 6544, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo bar**\n", "html": "

foo bar

\n", - "example": 358, - "start_line": 6318, - "end_line": 6322, + "example": 377, + "start_line": 6549, + "end_line": 6553, "section": "Emphasis and strong emphasis" }, { "markdown": "** foo bar**\n", "html": "

** foo bar**

\n", - "example": 359, - "start_line": 6328, - "end_line": 6332, + "example": 378, + "start_line": 6559, + "end_line": 6563, "section": "Emphasis and strong emphasis" }, { "markdown": "a**\"foo\"**\n", "html": "

a**"foo"**

\n", - "example": 360, - "start_line": 6339, - "end_line": 6343, + "example": 379, + "start_line": 6570, + "end_line": 6574, "section": "Emphasis and strong emphasis" }, { "markdown": "foo**bar**\n", "html": "

foobar

\n", - "example": 361, - "start_line": 6348, - "end_line": 6352, + "example": 380, + "start_line": 6579, + "end_line": 6583, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo bar__\n", "html": "

foo bar

\n", - "example": 362, - "start_line": 6357, - "end_line": 6361, + "example": 381, + "start_line": 6588, + "end_line": 6592, "section": "Emphasis and strong emphasis" }, { "markdown": "__ foo bar__\n", "html": "

__ foo bar__

\n", - "example": 363, - "start_line": 6367, - "end_line": 6371, + "example": 382, + "start_line": 6598, + "end_line": 6602, "section": "Emphasis and strong emphasis" }, { "markdown": "__\nfoo bar__\n", "html": "

__\nfoo bar__

\n", - "example": 364, - "start_line": 6375, - "end_line": 6381, + "example": 383, + "start_line": 6606, + "end_line": 6612, "section": "Emphasis and strong emphasis" }, { "markdown": "a__\"foo\"__\n", "html": "

a__"foo"__

\n", - "example": 365, - "start_line": 6387, - "end_line": 6391, + "example": 384, + "start_line": 6618, + "end_line": 6622, "section": "Emphasis and strong emphasis" }, { "markdown": "foo__bar__\n", "html": "

foo__bar__

\n", - "example": 366, - "start_line": 6396, - "end_line": 6400, + "example": 385, + "start_line": 6627, + "end_line": 6631, "section": "Emphasis and strong emphasis" }, { "markdown": "5__6__78\n", "html": "

5__6__78

\n", - "example": 367, - "start_line": 6403, - "end_line": 6407, + "example": 386, + "start_line": 6634, + "end_line": 6638, "section": "Emphasis and strong emphasis" }, { "markdown": "пристаням__стремятся__\n", "html": "

пристаням__стремятся__

\n", - "example": 368, - "start_line": 6410, - "end_line": 6414, + "example": 387, + "start_line": 6641, + "end_line": 6645, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo, __bar__, baz__\n", "html": "

foo, bar, baz

\n", - "example": 369, - "start_line": 6417, - "end_line": 6421, + "example": 388, + "start_line": 6648, + "end_line": 6652, "section": "Emphasis and strong emphasis" }, { "markdown": "foo-__(bar)__\n", "html": "

foo-(bar)

\n", - "example": 370, - "start_line": 6428, - "end_line": 6432, + "example": 389, + "start_line": 6659, + "end_line": 6663, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo bar **\n", "html": "

**foo bar **

\n", - "example": 371, - "start_line": 6441, - "end_line": 6445, + "example": 390, + "start_line": 6672, + "end_line": 6676, "section": "Emphasis and strong emphasis" }, { "markdown": "**(**foo)\n", "html": "

**(**foo)

\n", - "example": 372, - "start_line": 6454, - "end_line": 6458, + "example": 391, + "start_line": 6685, + "end_line": 6689, "section": "Emphasis and strong emphasis" }, { "markdown": "*(**foo**)*\n", "html": "

(foo)

\n", - "example": 373, - "start_line": 6464, - "end_line": 6468, + "example": 392, + "start_line": 6695, + "end_line": 6699, "section": "Emphasis and strong emphasis" }, { "markdown": "**Gomphocarpus (*Gomphocarpus physocarpus*, syn.\n*Asclepias physocarpa*)**\n", "html": "

Gomphocarpus (Gomphocarpus physocarpus, syn.\nAsclepias physocarpa)

\n", - "example": 374, - "start_line": 6471, - "end_line": 6477, + "example": 393, + "start_line": 6702, + "end_line": 6708, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo \"*bar*\" foo**\n", "html": "

foo "bar" foo

\n", - "example": 375, - "start_line": 6480, - "end_line": 6484, + "example": 394, + "start_line": 6711, + "end_line": 6715, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo**bar\n", "html": "

foobar

\n", - "example": 376, - "start_line": 6489, - "end_line": 6493, + "example": 395, + "start_line": 6720, + "end_line": 6724, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo bar __\n", "html": "

__foo bar __

\n", - "example": 377, - "start_line": 6501, - "end_line": 6505, + "example": 396, + "start_line": 6732, + "end_line": 6736, "section": "Emphasis and strong emphasis" }, { "markdown": "__(__foo)\n", "html": "

__(__foo)

\n", - "example": 378, - "start_line": 6511, - "end_line": 6515, + "example": 397, + "start_line": 6742, + "end_line": 6746, "section": "Emphasis and strong emphasis" }, { "markdown": "_(__foo__)_\n", "html": "

(foo)

\n", - "example": 379, - "start_line": 6521, - "end_line": 6525, + "example": 398, + "start_line": 6752, + "end_line": 6756, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo__bar\n", "html": "

__foo__bar

\n", - "example": 380, - "start_line": 6530, - "end_line": 6534, + "example": 399, + "start_line": 6761, + "end_line": 6765, "section": "Emphasis and strong emphasis" }, { "markdown": "__пристаням__стремятся\n", "html": "

__пристаням__стремятся

\n", - "example": 381, - "start_line": 6537, - "end_line": 6541, + "example": 400, + "start_line": 6768, + "end_line": 6772, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo__bar__baz__\n", "html": "

foo__bar__baz

\n", - "example": 382, - "start_line": 6544, - "end_line": 6548, + "example": 401, + "start_line": 6775, + "end_line": 6779, "section": "Emphasis and strong emphasis" }, { "markdown": "__(bar)__.\n", "html": "

(bar).

\n", - "example": 383, - "start_line": 6555, - "end_line": 6559, + "example": 402, + "start_line": 6786, + "end_line": 6790, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo [bar](/url)*\n", "html": "

foo bar

\n", - "example": 384, - "start_line": 6567, - "end_line": 6571, + "example": 403, + "start_line": 6798, + "end_line": 6802, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo\nbar*\n", "html": "

foo\nbar

\n", - "example": 385, - "start_line": 6574, - "end_line": 6580, + "example": 404, + "start_line": 6805, + "end_line": 6811, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo __bar__ baz_\n", "html": "

foo bar baz

\n", - "example": 386, - "start_line": 6586, - "end_line": 6590, + "example": 405, + "start_line": 6817, + "end_line": 6821, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo _bar_ baz_\n", "html": "

foo bar baz

\n", - "example": 387, - "start_line": 6593, - "end_line": 6597, + "example": 406, + "start_line": 6824, + "end_line": 6828, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo_ bar_\n", "html": "

foo bar

\n", - "example": 388, - "start_line": 6600, - "end_line": 6604, + "example": 407, + "start_line": 6831, + "end_line": 6835, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo *bar**\n", "html": "

foo bar

\n", - "example": 389, - "start_line": 6607, - "end_line": 6611, + "example": 408, + "start_line": 6838, + "end_line": 6842, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo **bar** baz*\n", "html": "

foo bar baz

\n", - "example": 390, - "start_line": 6614, - "end_line": 6618, + "example": 409, + "start_line": 6845, + "end_line": 6849, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo**bar**baz*\n", "html": "

foobarbaz

\n", - "example": 391, - "start_line": 6620, - "end_line": 6624, + "example": 410, + "start_line": 6851, + "end_line": 6855, + "section": "Emphasis and strong emphasis" + }, + { + "markdown": "*foo**bar*\n", + "html": "

foo**bar

\n", + "example": 411, + "start_line": 6875, + "end_line": 6879, "section": "Emphasis and strong emphasis" }, { "markdown": "***foo** bar*\n", "html": "

foo bar

\n", - "example": 392, - "start_line": 6645, - "end_line": 6649, + "example": 412, + "start_line": 6888, + "end_line": 6892, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo **bar***\n", "html": "

foo bar

\n", - "example": 393, - "start_line": 6652, - "end_line": 6656, + "example": 413, + "start_line": 6895, + "end_line": 6899, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo**bar***\n", "html": "

foobar

\n", - "example": 394, - "start_line": 6659, - "end_line": 6663, + "example": 414, + "start_line": 6902, + "end_line": 6906, + "section": "Emphasis and strong emphasis" + }, + { + "markdown": "foo***bar***baz\n", + "html": "

foobarbaz

\n", + "example": 415, + "start_line": 6913, + "end_line": 6917, + "section": "Emphasis and strong emphasis" + }, + { + "markdown": "foo******bar*********baz\n", + "html": "

foobar***baz

\n", + "example": 416, + "start_line": 6919, + "end_line": 6923, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo **bar *baz* bim** bop*\n", "html": "

foo bar baz bim bop

\n", - "example": 395, - "start_line": 6668, - "end_line": 6672, + "example": 417, + "start_line": 6928, + "end_line": 6932, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo [*bar*](/url)*\n", "html": "

foo bar

\n", - "example": 396, - "start_line": 6675, - "end_line": 6679, + "example": 418, + "start_line": 6935, + "end_line": 6939, "section": "Emphasis and strong emphasis" }, { "markdown": "** is not an empty emphasis\n", "html": "

** is not an empty emphasis

\n", - "example": 397, - "start_line": 6684, - "end_line": 6688, + "example": 419, + "start_line": 6944, + "end_line": 6948, "section": "Emphasis and strong emphasis" }, { "markdown": "**** is not an empty strong emphasis\n", "html": "

**** is not an empty strong emphasis

\n", - "example": 398, - "start_line": 6691, - "end_line": 6695, + "example": 420, + "start_line": 6951, + "end_line": 6955, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo [bar](/url)**\n", "html": "

foo bar

\n", - "example": 399, - "start_line": 6704, - "end_line": 6708, + "example": 421, + "start_line": 6964, + "end_line": 6968, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo\nbar**\n", "html": "

foo\nbar

\n", - "example": 400, - "start_line": 6711, - "end_line": 6717, + "example": 422, + "start_line": 6971, + "end_line": 6977, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo _bar_ baz__\n", "html": "

foo bar baz

\n", - "example": 401, - "start_line": 6723, - "end_line": 6727, + "example": 423, + "start_line": 6983, + "end_line": 6987, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo __bar__ baz__\n", "html": "

foo bar baz

\n", - "example": 402, - "start_line": 6730, - "end_line": 6734, + "example": 424, + "start_line": 6990, + "end_line": 6994, "section": "Emphasis and strong emphasis" }, { "markdown": "____foo__ bar__\n", "html": "

foo bar

\n", - "example": 403, - "start_line": 6737, - "end_line": 6741, + "example": 425, + "start_line": 6997, + "end_line": 7001, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo **bar****\n", "html": "

foo bar

\n", - "example": 404, - "start_line": 6744, - "end_line": 6748, + "example": 426, + "start_line": 7004, + "end_line": 7008, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo *bar* baz**\n", "html": "

foo bar baz

\n", - "example": 405, - "start_line": 6751, - "end_line": 6755, + "example": 427, + "start_line": 7011, + "end_line": 7015, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo*bar*baz**\n", "html": "

foobarbaz

\n", - "example": 406, - "start_line": 6758, - "end_line": 6762, + "example": 428, + "start_line": 7018, + "end_line": 7022, "section": "Emphasis and strong emphasis" }, { "markdown": "***foo* bar**\n", "html": "

foo bar

\n", - "example": 407, - "start_line": 6765, - "end_line": 6769, + "example": 429, + "start_line": 7025, + "end_line": 7029, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo *bar***\n", "html": "

foo bar

\n", - "example": 408, - "start_line": 6772, - "end_line": 6776, + "example": 430, + "start_line": 7032, + "end_line": 7036, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo *bar **baz**\nbim* bop**\n", "html": "

foo bar baz\nbim bop

\n", - "example": 409, - "start_line": 6781, - "end_line": 6787, + "example": 431, + "start_line": 7041, + "end_line": 7047, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo [*bar*](/url)**\n", "html": "

foo bar

\n", - "example": 410, - "start_line": 6790, - "end_line": 6794, + "example": 432, + "start_line": 7050, + "end_line": 7054, "section": "Emphasis and strong emphasis" }, { "markdown": "__ is not an empty emphasis\n", "html": "

__ is not an empty emphasis

\n", - "example": 411, - "start_line": 6799, - "end_line": 6803, + "example": 433, + "start_line": 7059, + "end_line": 7063, "section": "Emphasis and strong emphasis" }, { "markdown": "____ is not an empty strong emphasis\n", "html": "

____ is not an empty strong emphasis

\n", - "example": 412, - "start_line": 6806, - "end_line": 6810, + "example": 434, + "start_line": 7066, + "end_line": 7070, "section": "Emphasis and strong emphasis" }, { "markdown": "foo ***\n", "html": "

foo ***

\n", - "example": 413, - "start_line": 6816, - "end_line": 6820, + "example": 435, + "start_line": 7076, + "end_line": 7080, "section": "Emphasis and strong emphasis" }, { "markdown": "foo *\\**\n", "html": "

foo *

\n", - "example": 414, - "start_line": 6823, - "end_line": 6827, + "example": 436, + "start_line": 7083, + "end_line": 7087, "section": "Emphasis and strong emphasis" }, { "markdown": "foo *_*\n", "html": "

foo _

\n", - "example": 415, - "start_line": 6830, - "end_line": 6834, + "example": 437, + "start_line": 7090, + "end_line": 7094, "section": "Emphasis and strong emphasis" }, { "markdown": "foo *****\n", "html": "

foo *****

\n", - "example": 416, - "start_line": 6837, - "end_line": 6841, + "example": 438, + "start_line": 7097, + "end_line": 7101, "section": "Emphasis and strong emphasis" }, { "markdown": "foo **\\***\n", "html": "

foo *

\n", - "example": 417, - "start_line": 6844, - "end_line": 6848, + "example": 439, + "start_line": 7104, + "end_line": 7108, "section": "Emphasis and strong emphasis" }, { "markdown": "foo **_**\n", "html": "

foo _

\n", - "example": 418, - "start_line": 6851, - "end_line": 6855, + "example": 440, + "start_line": 7111, + "end_line": 7115, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo*\n", "html": "

*foo

\n", - "example": 419, - "start_line": 6862, - "end_line": 6866, + "example": 441, + "start_line": 7122, + "end_line": 7126, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo**\n", "html": "

foo*

\n", - "example": 420, - "start_line": 6869, - "end_line": 6873, + "example": 442, + "start_line": 7129, + "end_line": 7133, "section": "Emphasis and strong emphasis" }, { "markdown": "***foo**\n", "html": "

*foo

\n", - "example": 421, - "start_line": 6876, - "end_line": 6880, + "example": 443, + "start_line": 7136, + "end_line": 7140, "section": "Emphasis and strong emphasis" }, { "markdown": "****foo*\n", "html": "

***foo

\n", - "example": 422, - "start_line": 6883, - "end_line": 6887, + "example": 444, + "start_line": 7143, + "end_line": 7147, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo***\n", "html": "

foo*

\n", - "example": 423, - "start_line": 6890, - "end_line": 6894, + "example": 445, + "start_line": 7150, + "end_line": 7154, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo****\n", "html": "

foo***

\n", - "example": 424, - "start_line": 6897, - "end_line": 6901, + "example": 446, + "start_line": 7157, + "end_line": 7161, "section": "Emphasis and strong emphasis" }, { "markdown": "foo ___\n", "html": "

foo ___

\n", - "example": 425, - "start_line": 6907, - "end_line": 6911, + "example": 447, + "start_line": 7167, + "end_line": 7171, "section": "Emphasis and strong emphasis" }, { "markdown": "foo _\\__\n", "html": "

foo _

\n", - "example": 426, - "start_line": 6914, - "end_line": 6918, + "example": 448, + "start_line": 7174, + "end_line": 7178, "section": "Emphasis and strong emphasis" }, { "markdown": "foo _*_\n", "html": "

foo *

\n", - "example": 427, - "start_line": 6921, - "end_line": 6925, + "example": 449, + "start_line": 7181, + "end_line": 7185, "section": "Emphasis and strong emphasis" }, { "markdown": "foo _____\n", "html": "

foo _____

\n", - "example": 428, - "start_line": 6928, - "end_line": 6932, + "example": 450, + "start_line": 7188, + "end_line": 7192, "section": "Emphasis and strong emphasis" }, { "markdown": "foo __\\___\n", "html": "

foo _

\n", - "example": 429, - "start_line": 6935, - "end_line": 6939, + "example": 451, + "start_line": 7195, + "end_line": 7199, "section": "Emphasis and strong emphasis" }, { "markdown": "foo __*__\n", "html": "

foo *

\n", - "example": 430, - "start_line": 6942, - "end_line": 6946, + "example": 452, + "start_line": 7202, + "end_line": 7206, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo_\n", "html": "

_foo

\n", - "example": 431, - "start_line": 6949, - "end_line": 6953, + "example": 453, + "start_line": 7209, + "end_line": 7213, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo__\n", "html": "

foo_

\n", - "example": 432, - "start_line": 6960, - "end_line": 6964, + "example": 454, + "start_line": 7220, + "end_line": 7224, "section": "Emphasis and strong emphasis" }, { "markdown": "___foo__\n", "html": "

_foo

\n", - "example": 433, - "start_line": 6967, - "end_line": 6971, + "example": 455, + "start_line": 7227, + "end_line": 7231, "section": "Emphasis and strong emphasis" }, { "markdown": "____foo_\n", "html": "

___foo

\n", - "example": 434, - "start_line": 6974, - "end_line": 6978, + "example": 456, + "start_line": 7234, + "end_line": 7238, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo___\n", "html": "

foo_

\n", - "example": 435, - "start_line": 6981, - "end_line": 6985, + "example": 457, + "start_line": 7241, + "end_line": 7245, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo____\n", "html": "

foo___

\n", - "example": 436, - "start_line": 6988, - "end_line": 6992, + "example": 458, + "start_line": 7248, + "end_line": 7252, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo**\n", "html": "

foo

\n", - "example": 437, - "start_line": 6998, - "end_line": 7002, + "example": 459, + "start_line": 7258, + "end_line": 7262, "section": "Emphasis and strong emphasis" }, { "markdown": "*_foo_*\n", "html": "

foo

\n", - "example": 438, - "start_line": 7005, - "end_line": 7009, + "example": 460, + "start_line": 7265, + "end_line": 7269, "section": "Emphasis and strong emphasis" }, { "markdown": "__foo__\n", "html": "

foo

\n", - "example": 439, - "start_line": 7012, - "end_line": 7016, + "example": 461, + "start_line": 7272, + "end_line": 7276, "section": "Emphasis and strong emphasis" }, { "markdown": "_*foo*_\n", "html": "

foo

\n", - "example": 440, - "start_line": 7019, - "end_line": 7023, + "example": 462, + "start_line": 7279, + "end_line": 7283, "section": "Emphasis and strong emphasis" }, { "markdown": "****foo****\n", "html": "

foo

\n", - "example": 441, - "start_line": 7029, - "end_line": 7033, + "example": 463, + "start_line": 7289, + "end_line": 7293, "section": "Emphasis and strong emphasis" }, { "markdown": "____foo____\n", "html": "

foo

\n", - "example": 442, - "start_line": 7036, - "end_line": 7040, + "example": 464, + "start_line": 7296, + "end_line": 7300, "section": "Emphasis and strong emphasis" }, { "markdown": "******foo******\n", "html": "

foo

\n", - "example": 443, - "start_line": 7047, - "end_line": 7051, + "example": 465, + "start_line": 7307, + "end_line": 7311, "section": "Emphasis and strong emphasis" }, { "markdown": "***foo***\n", "html": "

foo

\n", - "example": 444, - "start_line": 7056, - "end_line": 7060, + "example": 466, + "start_line": 7316, + "end_line": 7320, "section": "Emphasis and strong emphasis" }, { "markdown": "_____foo_____\n", "html": "

foo

\n", - "example": 445, - "start_line": 7063, - "end_line": 7067, + "example": 467, + "start_line": 7323, + "end_line": 7327, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo _bar* baz_\n", "html": "

foo _bar baz_

\n", - "example": 446, - "start_line": 7072, - "end_line": 7076, + "example": 468, + "start_line": 7332, + "end_line": 7336, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo __bar *baz bim__ bam*\n", "html": "

foo bar *baz bim bam

\n", - "example": 447, - "start_line": 7079, - "end_line": 7083, + "example": 469, + "start_line": 7339, + "end_line": 7343, "section": "Emphasis and strong emphasis" }, { "markdown": "**foo **bar baz**\n", "html": "

**foo bar baz

\n", - "example": 448, - "start_line": 7088, - "end_line": 7092, + "example": 470, + "start_line": 7348, + "end_line": 7352, "section": "Emphasis and strong emphasis" }, { "markdown": "*foo *bar baz*\n", "html": "

*foo bar baz

\n", - "example": 449, - "start_line": 7095, - "end_line": 7099, + "example": 471, + "start_line": 7355, + "end_line": 7359, "section": "Emphasis and strong emphasis" }, { "markdown": "*[bar*](/url)\n", "html": "

*bar*

\n", - "example": 450, - "start_line": 7104, - "end_line": 7108, + "example": 472, + "start_line": 7364, + "end_line": 7368, "section": "Emphasis and strong emphasis" }, { "markdown": "_foo [bar_](/url)\n", "html": "

_foo bar_

\n", - "example": 451, - "start_line": 7111, - "end_line": 7115, + "example": 473, + "start_line": 7371, + "end_line": 7375, "section": "Emphasis and strong emphasis" }, { "markdown": "*\n", "html": "

*

\n", - "example": 452, - "start_line": 7118, - "end_line": 7122, + "example": 474, + "start_line": 7378, + "end_line": 7382, "section": "Emphasis and strong emphasis" }, { "markdown": "**\n", "html": "

**

\n", - "example": 453, - "start_line": 7125, - "end_line": 7129, + "example": 475, + "start_line": 7385, + "end_line": 7389, "section": "Emphasis and strong emphasis" }, { "markdown": "__\n", "html": "

__

\n", - "example": 454, - "start_line": 7132, - "end_line": 7136, + "example": 476, + "start_line": 7392, + "end_line": 7396, "section": "Emphasis and strong emphasis" }, { "markdown": "*a `*`*\n", "html": "

a *

\n", - "example": 455, - "start_line": 7139, - "end_line": 7143, + "example": 477, + "start_line": 7399, + "end_line": 7403, "section": "Emphasis and strong emphasis" }, { "markdown": "_a `_`_\n", "html": "

a _

\n", - "example": 456, - "start_line": 7146, - "end_line": 7150, + "example": 478, + "start_line": 7406, + "end_line": 7410, "section": "Emphasis and strong emphasis" }, { "markdown": "**a\n", "html": "

**ahttp://foo.bar/?q=**

\n", - "example": 457, - "start_line": 7153, - "end_line": 7157, + "example": 479, + "start_line": 7413, + "end_line": 7417, "section": "Emphasis and strong emphasis" }, { "markdown": "__a\n", "html": "

__ahttp://foo.bar/?q=__

\n", - "example": 458, - "start_line": 7160, - "end_line": 7164, + "example": 480, + "start_line": 7420, + "end_line": 7424, "section": "Emphasis and strong emphasis" }, { "markdown": "[link](/uri \"title\")\n", "html": "

link

\n", - "example": 459, - "start_line": 7241, - "end_line": 7245, + "example": 481, + "start_line": 7503, + "end_line": 7507, "section": "Links" }, { "markdown": "[link](/uri)\n", "html": "

link

\n", - "example": 460, - "start_line": 7250, - "end_line": 7254, + "example": 482, + "start_line": 7512, + "end_line": 7516, "section": "Links" }, { "markdown": "[link]()\n", "html": "

link

\n", - "example": 461, - "start_line": 7259, - "end_line": 7263, + "example": 483, + "start_line": 7521, + "end_line": 7525, "section": "Links" }, { "markdown": "[link](<>)\n", "html": "

link

\n", - "example": 462, - "start_line": 7266, - "end_line": 7270, + "example": 484, + "start_line": 7528, + "end_line": 7532, "section": "Links" }, { "markdown": "[link](/my uri)\n", "html": "

[link](/my uri)

\n", - "example": 463, - "start_line": 7276, - "end_line": 7280, + "example": 485, + "start_line": 7537, + "end_line": 7541, "section": "Links" }, { "markdown": "[link](
)\n", - "html": "

[link](</my uri>)

\n", - "example": 464, - "start_line": 7283, - "end_line": 7287, + "html": "

link

\n", + "example": 486, + "start_line": 7543, + "end_line": 7547, "section": "Links" }, { "markdown": "[link](foo\nbar)\n", "html": "

[link](foo\nbar)

\n", - "example": 465, - "start_line": 7290, - "end_line": 7296, + "example": 487, + "start_line": 7552, + "end_line": 7558, "section": "Links" }, { "markdown": "[link]()\n", "html": "

[link]()

\n", - "example": 466, - "start_line": 7299, - "end_line": 7305, + "example": 488, + "start_line": 7560, + "end_line": 7566, + "section": "Links" + }, + { + "markdown": "[a]()\n", + "html": "

a

\n", + "example": 489, + "start_line": 7571, + "end_line": 7575, + "section": "Links" + }, + { + "markdown": "[link]()\n", + "html": "

[link](<foo>)

\n", + "example": 490, + "start_line": 7579, + "end_line": 7583, + "section": "Links" + }, + { + "markdown": "[a](\n[a](c)\n", + "html": "

[a](<b)c\n[a](<b)c>\n[a](c)

\n", + "example": 491, + "start_line": 7588, + "end_line": 7596, "section": "Links" }, { "markdown": "[link](\\(foo\\))\n", "html": "

link

\n", - "example": 467, - "start_line": 7309, - "end_line": 7313, + "example": 492, + "start_line": 7600, + "end_line": 7604, "section": "Links" }, { "markdown": "[link](foo(and(bar)))\n", "html": "

link

\n", - "example": 468, - "start_line": 7318, - "end_line": 7322, + "example": 493, + "start_line": 7609, + "end_line": 7613, "section": "Links" }, { "markdown": "[link](foo\\(and\\(bar\\))\n", "html": "

link

\n", - "example": 469, - "start_line": 7327, - "end_line": 7331, + "example": 494, + "start_line": 7618, + "end_line": 7622, "section": "Links" }, { "markdown": "[link]()\n", "html": "

link

\n", - "example": 470, - "start_line": 7334, - "end_line": 7338, + "example": 495, + "start_line": 7625, + "end_line": 7629, "section": "Links" }, { "markdown": "[link](foo\\)\\:)\n", "html": "

link

\n", - "example": 471, - "start_line": 7344, - "end_line": 7348, + "example": 496, + "start_line": 7635, + "end_line": 7639, "section": "Links" }, { "markdown": "[link](#fragment)\n\n[link](http://example.com#fragment)\n\n[link](http://example.com?foo=3#frag)\n", "html": "

link

\n

link

\n

link

\n", - "example": 472, - "start_line": 7353, - "end_line": 7363, + "example": 497, + "start_line": 7644, + "end_line": 7654, "section": "Links" }, { "markdown": "[link](foo\\bar)\n", "html": "

link

\n", - "example": 473, - "start_line": 7369, - "end_line": 7373, + "example": 498, + "start_line": 7660, + "end_line": 7664, "section": "Links" }, { "markdown": "[link](foo%20bä)\n", "html": "

link

\n", - "example": 474, - "start_line": 7385, - "end_line": 7389, + "example": 499, + "start_line": 7676, + "end_line": 7680, "section": "Links" }, { "markdown": "[link](\"title\")\n", "html": "

link

\n", - "example": 475, - "start_line": 7396, - "end_line": 7400, + "example": 500, + "start_line": 7687, + "end_line": 7691, "section": "Links" }, { "markdown": "[link](/url \"title\")\n[link](/url 'title')\n[link](/url (title))\n", "html": "

link\nlink\nlink

\n", - "example": 476, - "start_line": 7405, - "end_line": 7413, + "example": 501, + "start_line": 7696, + "end_line": 7704, "section": "Links" }, { "markdown": "[link](/url \"title \\\""\")\n", "html": "

link

\n", - "example": 477, - "start_line": 7419, - "end_line": 7423, + "example": 502, + "start_line": 7710, + "end_line": 7714, "section": "Links" }, { "markdown": "[link](/url \"title\")\n", "html": "

link

\n", - "example": 478, - "start_line": 7429, - "end_line": 7433, + "example": 503, + "start_line": 7720, + "end_line": 7724, "section": "Links" }, { "markdown": "[link](/url \"title \"and\" title\")\n", "html": "

[link](/url "title "and" title")

\n", - "example": 479, - "start_line": 7438, - "end_line": 7442, + "example": 504, + "start_line": 7729, + "end_line": 7733, "section": "Links" }, { "markdown": "[link](/url 'title \"and\" title')\n", "html": "

link

\n", - "example": 480, - "start_line": 7447, - "end_line": 7451, + "example": 505, + "start_line": 7738, + "end_line": 7742, "section": "Links" }, { "markdown": "[link]( /uri\n \"title\" )\n", "html": "

link

\n", - "example": 481, - "start_line": 7471, - "end_line": 7476, + "example": 506, + "start_line": 7762, + "end_line": 7767, "section": "Links" }, { "markdown": "[link] (/uri)\n", "html": "

[link] (/uri)

\n", - "example": 482, - "start_line": 7482, - "end_line": 7486, + "example": 507, + "start_line": 7773, + "end_line": 7777, "section": "Links" }, { "markdown": "[link [foo [bar]]](/uri)\n", "html": "

link [foo [bar]]

\n", - "example": 483, - "start_line": 7492, - "end_line": 7496, + "example": 508, + "start_line": 7783, + "end_line": 7787, "section": "Links" }, { "markdown": "[link] bar](/uri)\n", "html": "

[link] bar](/uri)

\n", - "example": 484, - "start_line": 7499, - "end_line": 7503, + "example": 509, + "start_line": 7790, + "end_line": 7794, "section": "Links" }, { "markdown": "[link [bar](/uri)\n", "html": "

[link bar

\n", - "example": 485, - "start_line": 7506, - "end_line": 7510, + "example": 510, + "start_line": 7797, + "end_line": 7801, "section": "Links" }, { "markdown": "[link \\[bar](/uri)\n", "html": "

link [bar

\n", - "example": 486, - "start_line": 7513, - "end_line": 7517, + "example": 511, + "start_line": 7804, + "end_line": 7808, "section": "Links" }, { "markdown": "[link *foo **bar** `#`*](/uri)\n", "html": "

link foo bar #

\n", - "example": 487, - "start_line": 7522, - "end_line": 7526, + "example": 512, + "start_line": 7813, + "end_line": 7817, "section": "Links" }, { "markdown": "[![moon](moon.jpg)](/uri)\n", "html": "

\"moon\"

\n", - "example": 488, - "start_line": 7529, - "end_line": 7533, + "example": 513, + "start_line": 7820, + "end_line": 7824, "section": "Links" }, { "markdown": "[foo [bar](/uri)](/uri)\n", "html": "

[foo bar](/uri)

\n", - "example": 489, - "start_line": 7538, - "end_line": 7542, + "example": 514, + "start_line": 7829, + "end_line": 7833, "section": "Links" }, { "markdown": "[foo *[bar [baz](/uri)](/uri)*](/uri)\n", "html": "

[foo [bar baz](/uri)](/uri)

\n", - "example": 490, - "start_line": 7545, - "end_line": 7549, + "example": 515, + "start_line": 7836, + "end_line": 7840, "section": "Links" }, { "markdown": "![[[foo](uri1)](uri2)](uri3)\n", "html": "

\"[foo](uri2)\"

\n", - "example": 491, - "start_line": 7552, - "end_line": 7556, + "example": 516, + "start_line": 7843, + "end_line": 7847, "section": "Links" }, { "markdown": "*[foo*](/uri)\n", "html": "

*foo*

\n", - "example": 492, - "start_line": 7562, - "end_line": 7566, + "example": 517, + "start_line": 7853, + "end_line": 7857, "section": "Links" }, { "markdown": "[foo *bar](baz*)\n", "html": "

foo *bar

\n", - "example": 493, - "start_line": 7569, - "end_line": 7573, + "example": 518, + "start_line": 7860, + "end_line": 7864, "section": "Links" }, { "markdown": "*foo [bar* baz]\n", "html": "

foo [bar baz]

\n", - "example": 494, - "start_line": 7579, - "end_line": 7583, + "example": 519, + "start_line": 7870, + "end_line": 7874, "section": "Links" }, { "markdown": "[foo \n", "html": "

[foo

\n", - "example": 495, - "start_line": 7589, - "end_line": 7593, + "example": 520, + "start_line": 7880, + "end_line": 7884, "section": "Links" }, { "markdown": "[foo`](/uri)`\n", "html": "

[foo](/uri)

\n", - "example": 496, - "start_line": 7596, - "end_line": 7600, + "example": 521, + "start_line": 7887, + "end_line": 7891, "section": "Links" }, { "markdown": "[foo\n", "html": "

[foohttp://example.com/?search=](uri)

\n", - "example": 497, - "start_line": 7603, - "end_line": 7607, + "example": 522, + "start_line": 7894, + "end_line": 7898, "section": "Links" }, { "markdown": "[foo][bar]\n\n[bar]: /url \"title\"\n", "html": "

foo

\n", - "example": 498, - "start_line": 7641, - "end_line": 7647, + "example": 523, + "start_line": 7932, + "end_line": 7938, "section": "Links" }, { "markdown": "[link [foo [bar]]][ref]\n\n[ref]: /uri\n", "html": "

link [foo [bar]]

\n", - "example": 499, - "start_line": 7656, - "end_line": 7662, + "example": 524, + "start_line": 7947, + "end_line": 7953, "section": "Links" }, { "markdown": "[link \\[bar][ref]\n\n[ref]: /uri\n", "html": "

link [bar

\n", - "example": 500, - "start_line": 7665, - "end_line": 7671, + "example": 525, + "start_line": 7956, + "end_line": 7962, "section": "Links" }, { "markdown": "[link *foo **bar** `#`*][ref]\n\n[ref]: /uri\n", "html": "

link foo bar #

\n", - "example": 501, - "start_line": 7676, - "end_line": 7682, + "example": 526, + "start_line": 7967, + "end_line": 7973, "section": "Links" }, { "markdown": "[![moon](moon.jpg)][ref]\n\n[ref]: /uri\n", "html": "

\"moon\"

\n", - "example": 502, - "start_line": 7685, - "end_line": 7691, + "example": 527, + "start_line": 7976, + "end_line": 7982, "section": "Links" }, { "markdown": "[foo [bar](/uri)][ref]\n\n[ref]: /uri\n", "html": "

[foo bar]ref

\n", - "example": 503, - "start_line": 7696, - "end_line": 7702, + "example": 528, + "start_line": 7987, + "end_line": 7993, "section": "Links" }, { "markdown": "[foo *bar [baz][ref]*][ref]\n\n[ref]: /uri\n", "html": "

[foo bar baz]ref

\n", - "example": 504, - "start_line": 7705, - "end_line": 7711, + "example": 529, + "start_line": 7996, + "end_line": 8002, "section": "Links" }, { "markdown": "*[foo*][ref]\n\n[ref]: /uri\n", - "html": "

*foo*

\n", - "example": 505, - "start_line": 7720, - "end_line": 7726, + "html": "

*foo*

\n", + "example": 530, + "start_line": 8011, + "end_line": 8017, "section": "Links" }, { "markdown": "[foo *bar][ref]\n\n[ref]: /uri\n", "html": "

foo *bar

\n", - "example": 506, - "start_line": 7729, - "end_line": 7735, + "example": 531, + "start_line": 8020, + "end_line": 8026, "section": "Links" }, { "markdown": "[foo \n\n[ref]: /uri\n", "html": "

[foo

\n", - "example": 507, - "start_line": 7741, - "end_line": 7747, + "example": 532, + "start_line": 8032, + "end_line": 8038, "section": "Links" }, { "markdown": "[foo`][ref]`\n\n[ref]: /uri\n", "html": "

[foo][ref]

\n", - "example": 508, - "start_line": 7750, - "end_line": 7756, + "example": 533, + "start_line": 8041, + "end_line": 8047, "section": "Links" }, { "markdown": "[foo\n\n[ref]: /uri\n", "html": "

[foohttp://example.com/?search=][ref]

\n", - "example": 509, - "start_line": 7759, - "end_line": 7765, + "example": 534, + "start_line": 8050, + "end_line": 8056, "section": "Links" }, { "markdown": "[foo][BaR]\n\n[bar]: /url \"title\"\n", "html": "

foo

\n", - "example": 510, - "start_line": 7770, - "end_line": 7776, + "example": 535, + "start_line": 8061, + "end_line": 8067, "section": "Links" }, { "markdown": "[Толпой][Толпой] is a Russian word.\n\n[ТОЛПОЙ]: /url\n", "html": "

Толпой is a Russian word.

\n", - "example": 511, - "start_line": 7781, - "end_line": 7787, + "example": 536, + "start_line": 8072, + "end_line": 8078, "section": "Links" }, { "markdown": "[Foo\n bar]: /url\n\n[Baz][Foo bar]\n", "html": "

Baz

\n", - "example": 512, - "start_line": 7793, - "end_line": 7800, + "example": 537, + "start_line": 8084, + "end_line": 8091, "section": "Links" }, { "markdown": "[foo] [bar]\n\n[bar]: /url \"title\"\n", "html": "

[foo] bar

\n", - "example": 513, - "start_line": 7806, - "end_line": 7812, + "example": 538, + "start_line": 8097, + "end_line": 8103, "section": "Links" }, { "markdown": "[foo]\n[bar]\n\n[bar]: /url \"title\"\n", "html": "

[foo]\nbar

\n", - "example": 514, - "start_line": 7815, - "end_line": 7823, + "example": 539, + "start_line": 8106, + "end_line": 8114, "section": "Links" }, { "markdown": "[foo]: /url1\n\n[foo]: /url2\n\n[bar][foo]\n", "html": "

bar

\n", - "example": 515, - "start_line": 7856, - "end_line": 7864, + "example": 540, + "start_line": 8147, + "end_line": 8155, "section": "Links" }, { "markdown": "[bar][foo\\!]\n\n[foo!]: /url\n", "html": "

[bar][foo!]

\n", - "example": 516, - "start_line": 7871, - "end_line": 7877, + "example": 541, + "start_line": 8162, + "end_line": 8168, "section": "Links" }, { "markdown": "[foo][ref[]\n\n[ref[]: /uri\n", "html": "

[foo][ref[]

\n

[ref[]: /uri

\n", - "example": 517, - "start_line": 7883, - "end_line": 7890, + "example": 542, + "start_line": 8174, + "end_line": 8181, "section": "Links" }, { "markdown": "[foo][ref[bar]]\n\n[ref[bar]]: /uri\n", "html": "

[foo][ref[bar]]

\n

[ref[bar]]: /uri

\n", - "example": 518, - "start_line": 7893, - "end_line": 7900, + "example": 543, + "start_line": 8184, + "end_line": 8191, "section": "Links" }, { "markdown": "[[[foo]]]\n\n[[[foo]]]: /url\n", "html": "

[[[foo]]]

\n

[[[foo]]]: /url

\n", - "example": 519, - "start_line": 7903, - "end_line": 7910, + "example": 544, + "start_line": 8194, + "end_line": 8201, "section": "Links" }, { "markdown": "[foo][ref\\[]\n\n[ref\\[]: /uri\n", "html": "

foo

\n", - "example": 520, - "start_line": 7913, - "end_line": 7919, + "example": 545, + "start_line": 8204, + "end_line": 8210, "section": "Links" }, { "markdown": "[bar\\\\]: /uri\n\n[bar\\\\]\n", "html": "

bar\\

\n", - "example": 521, - "start_line": 7924, - "end_line": 7930, + "example": 546, + "start_line": 8215, + "end_line": 8221, "section": "Links" }, { "markdown": "[]\n\n[]: /uri\n", "html": "

[]

\n

[]: /uri

\n", - "example": 522, - "start_line": 7935, - "end_line": 7942, + "example": 547, + "start_line": 8226, + "end_line": 8233, "section": "Links" }, { "markdown": "[\n ]\n\n[\n ]: /uri\n", "html": "

[\n]

\n

[\n]: /uri

\n", - "example": 523, - "start_line": 7945, - "end_line": 7956, + "example": 548, + "start_line": 8236, + "end_line": 8247, "section": "Links" }, { "markdown": "[foo][]\n\n[foo]: /url \"title\"\n", "html": "

foo

\n", - "example": 524, - "start_line": 7968, - "end_line": 7974, + "example": 549, + "start_line": 8259, + "end_line": 8265, "section": "Links" }, { "markdown": "[*foo* bar][]\n\n[*foo* bar]: /url \"title\"\n", "html": "

foo bar

\n", - "example": 525, - "start_line": 7977, - "end_line": 7983, + "example": 550, + "start_line": 8268, + "end_line": 8274, "section": "Links" }, { "markdown": "[Foo][]\n\n[foo]: /url \"title\"\n", "html": "

Foo

\n", - "example": 526, - "start_line": 7988, - "end_line": 7994, + "example": 551, + "start_line": 8279, + "end_line": 8285, "section": "Links" }, { "markdown": "[foo] \n[]\n\n[foo]: /url \"title\"\n", "html": "

foo\n[]

\n", - "example": 527, - "start_line": 8001, - "end_line": 8009, + "example": 552, + "start_line": 8292, + "end_line": 8300, "section": "Links" }, { "markdown": "[foo]\n\n[foo]: /url \"title\"\n", "html": "

foo

\n", - "example": 528, - "start_line": 8021, - "end_line": 8027, + "example": 553, + "start_line": 8312, + "end_line": 8318, "section": "Links" }, { "markdown": "[*foo* bar]\n\n[*foo* bar]: /url \"title\"\n", "html": "

foo bar

\n", - "example": 529, - "start_line": 8030, - "end_line": 8036, + "example": 554, + "start_line": 8321, + "end_line": 8327, "section": "Links" }, { "markdown": "[[*foo* bar]]\n\n[*foo* bar]: /url \"title\"\n", "html": "

[foo bar]

\n", - "example": 530, - "start_line": 8039, - "end_line": 8045, + "example": 555, + "start_line": 8330, + "end_line": 8336, "section": "Links" }, { "markdown": "[[bar [foo]\n\n[foo]: /url\n", "html": "

[[bar foo

\n", - "example": 531, - "start_line": 8048, - "end_line": 8054, + "example": 556, + "start_line": 8339, + "end_line": 8345, "section": "Links" }, { "markdown": "[Foo]\n\n[foo]: /url \"title\"\n", "html": "

Foo

\n", - "example": 532, - "start_line": 8059, - "end_line": 8065, + "example": 557, + "start_line": 8350, + "end_line": 8356, "section": "Links" }, { "markdown": "[foo] bar\n\n[foo]: /url\n", "html": "

foo bar

\n", - "example": 533, - "start_line": 8070, - "end_line": 8076, + "example": 558, + "start_line": 8361, + "end_line": 8367, "section": "Links" }, { "markdown": "\\[foo]\n\n[foo]: /url \"title\"\n", "html": "

[foo]

\n", - "example": 534, - "start_line": 8082, - "end_line": 8088, + "example": 559, + "start_line": 8373, + "end_line": 8379, "section": "Links" }, { "markdown": "[foo*]: /url\n\n*[foo*]\n", "html": "

*foo*

\n", - "example": 535, - "start_line": 8094, - "end_line": 8100, + "example": 560, + "start_line": 8385, + "end_line": 8391, "section": "Links" }, { "markdown": "[foo][bar]\n\n[foo]: /url1\n[bar]: /url2\n", "html": "

foo

\n", - "example": 536, - "start_line": 8106, - "end_line": 8113, + "example": 561, + "start_line": 8397, + "end_line": 8404, "section": "Links" }, { "markdown": "[foo][]\n\n[foo]: /url1\n", "html": "

foo

\n", - "example": 537, - "start_line": 8115, - "end_line": 8121, + "example": 562, + "start_line": 8406, + "end_line": 8412, "section": "Links" }, { "markdown": "[foo]()\n\n[foo]: /url1\n", "html": "

foo

\n", - "example": 538, - "start_line": 8125, - "end_line": 8131, + "example": 563, + "start_line": 8416, + "end_line": 8422, "section": "Links" }, { "markdown": "[foo](not a link)\n\n[foo]: /url1\n", "html": "

foo(not a link)

\n", - "example": 539, - "start_line": 8133, - "end_line": 8139, + "example": 564, + "start_line": 8424, + "end_line": 8430, "section": "Links" }, { "markdown": "[foo][bar][baz]\n\n[baz]: /url\n", "html": "

[foo]bar

\n", - "example": 540, - "start_line": 8144, - "end_line": 8150, + "example": 565, + "start_line": 8435, + "end_line": 8441, "section": "Links" }, { "markdown": "[foo][bar][baz]\n\n[baz]: /url1\n[bar]: /url2\n", "html": "

foobaz

\n", - "example": 541, - "start_line": 8156, - "end_line": 8163, + "example": 566, + "start_line": 8447, + "end_line": 8454, "section": "Links" }, { "markdown": "[foo][bar][baz]\n\n[baz]: /url1\n[foo]: /url2\n", "html": "

[foo]bar

\n", - "example": 542, - "start_line": 8169, - "end_line": 8176, + "example": 567, + "start_line": 8460, + "end_line": 8467, "section": "Links" }, { "markdown": "![foo](/url \"title\")\n", "html": "

\"foo\"

\n", - "example": 543, - "start_line": 8192, - "end_line": 8196, + "example": 568, + "start_line": 8483, + "end_line": 8487, "section": "Images" }, { "markdown": "![foo *bar*]\n\n[foo *bar*]: train.jpg \"train & tracks\"\n", "html": "

\"foo

\n", - "example": 544, - "start_line": 8199, - "end_line": 8205, + "example": 569, + "start_line": 8490, + "end_line": 8496, "section": "Images" }, { "markdown": "![foo ![bar](/url)](/url2)\n", "html": "

\"foo

\n", - "example": 545, - "start_line": 8208, - "end_line": 8212, + "example": 570, + "start_line": 8499, + "end_line": 8503, "section": "Images" }, { "markdown": "![foo [bar](/url)](/url2)\n", "html": "

\"foo

\n", - "example": 546, - "start_line": 8215, - "end_line": 8219, + "example": 571, + "start_line": 8506, + "end_line": 8510, "section": "Images" }, { "markdown": "![foo *bar*][]\n\n[foo *bar*]: train.jpg \"train & tracks\"\n", "html": "

\"foo

\n", - "example": 547, - "start_line": 8229, - "end_line": 8235, + "example": 572, + "start_line": 8520, + "end_line": 8526, "section": "Images" }, { "markdown": "![foo *bar*][foobar]\n\n[FOOBAR]: train.jpg \"train & tracks\"\n", "html": "

\"foo

\n", - "example": 548, - "start_line": 8238, - "end_line": 8244, + "example": 573, + "start_line": 8529, + "end_line": 8535, "section": "Images" }, { "markdown": "![foo](train.jpg)\n", "html": "

\"foo\"

\n", - "example": 549, - "start_line": 8247, - "end_line": 8251, + "example": 574, + "start_line": 8538, + "end_line": 8542, "section": "Images" }, { "markdown": "My ![foo bar](/path/to/train.jpg \"title\" )\n", "html": "

My \"foo

\n", - "example": 550, - "start_line": 8254, - "end_line": 8258, + "example": 575, + "start_line": 8545, + "end_line": 8549, "section": "Images" }, { "markdown": "![foo]()\n", "html": "

\"foo\"

\n", - "example": 551, - "start_line": 8261, - "end_line": 8265, + "example": 576, + "start_line": 8552, + "end_line": 8556, "section": "Images" }, { "markdown": "![](/url)\n", "html": "

\"\"

\n", - "example": 552, - "start_line": 8268, - "end_line": 8272, + "example": 577, + "start_line": 8559, + "end_line": 8563, "section": "Images" }, { "markdown": "![foo][bar]\n\n[bar]: /url\n", "html": "

\"foo\"

\n", - "example": 553, - "start_line": 8277, - "end_line": 8283, + "example": 578, + "start_line": 8568, + "end_line": 8574, "section": "Images" }, { "markdown": "![foo][bar]\n\n[BAR]: /url\n", "html": "

\"foo\"

\n", - "example": 554, - "start_line": 8286, - "end_line": 8292, + "example": 579, + "start_line": 8577, + "end_line": 8583, "section": "Images" }, { "markdown": "![foo][]\n\n[foo]: /url \"title\"\n", "html": "

\"foo\"

\n", - "example": 555, - "start_line": 8297, - "end_line": 8303, + "example": 580, + "start_line": 8588, + "end_line": 8594, "section": "Images" }, { "markdown": "![*foo* bar][]\n\n[*foo* bar]: /url \"title\"\n", "html": "

\"foo

\n", - "example": 556, - "start_line": 8306, - "end_line": 8312, + "example": 581, + "start_line": 8597, + "end_line": 8603, "section": "Images" }, { "markdown": "![Foo][]\n\n[foo]: /url \"title\"\n", "html": "

\"Foo\"

\n", - "example": 557, - "start_line": 8317, - "end_line": 8323, + "example": 582, + "start_line": 8608, + "end_line": 8614, "section": "Images" }, { "markdown": "![foo] \n[]\n\n[foo]: /url \"title\"\n", "html": "

\"foo\"\n[]

\n", - "example": 558, - "start_line": 8329, - "end_line": 8337, + "example": 583, + "start_line": 8620, + "end_line": 8628, "section": "Images" }, { "markdown": "![foo]\n\n[foo]: /url \"title\"\n", "html": "

\"foo\"

\n", - "example": 559, - "start_line": 8342, - "end_line": 8348, + "example": 584, + "start_line": 8633, + "end_line": 8639, "section": "Images" }, { "markdown": "![*foo* bar]\n\n[*foo* bar]: /url \"title\"\n", "html": "

\"foo

\n", - "example": 560, - "start_line": 8351, - "end_line": 8357, + "example": 585, + "start_line": 8642, + "end_line": 8648, "section": "Images" }, { "markdown": "![[foo]]\n\n[[foo]]: /url \"title\"\n", "html": "

![[foo]]

\n

[[foo]]: /url "title"

\n", - "example": 561, - "start_line": 8362, - "end_line": 8369, + "example": 586, + "start_line": 8653, + "end_line": 8660, "section": "Images" }, { "markdown": "![Foo]\n\n[foo]: /url \"title\"\n", "html": "

\"Foo\"

\n", - "example": 562, - "start_line": 8374, - "end_line": 8380, + "example": 587, + "start_line": 8665, + "end_line": 8671, "section": "Images" }, { "markdown": "!\\[foo]\n\n[foo]: /url \"title\"\n", "html": "

![foo]

\n", - "example": 563, - "start_line": 8386, - "end_line": 8392, + "example": 588, + "start_line": 8677, + "end_line": 8683, "section": "Images" }, { "markdown": "\\![foo]\n\n[foo]: /url \"title\"\n", "html": "

!foo

\n", - "example": 564, - "start_line": 8398, - "end_line": 8404, + "example": 589, + "start_line": 8689, + "end_line": 8695, "section": "Images" }, { "markdown": "\n", "html": "

http://foo.bar.baz

\n", - "example": 565, - "start_line": 8431, - "end_line": 8435, + "example": 590, + "start_line": 8722, + "end_line": 8726, "section": "Autolinks" }, { "markdown": "\n", "html": "

http://foo.bar.baz/test?q=hello&id=22&boolean

\n", - "example": 566, - "start_line": 8438, - "end_line": 8442, + "example": 591, + "start_line": 8729, + "end_line": 8733, "section": "Autolinks" }, { "markdown": "\n", "html": "

irc://foo.bar:2233/baz

\n", - "example": 567, - "start_line": 8445, - "end_line": 8449, + "example": 592, + "start_line": 8736, + "end_line": 8740, "section": "Autolinks" }, { "markdown": "\n", "html": "

MAILTO:FOO@BAR.BAZ

\n", - "example": 568, - "start_line": 8454, - "end_line": 8458, + "example": 593, + "start_line": 8745, + "end_line": 8749, "section": "Autolinks" }, { "markdown": "\n", "html": "

a+b+c:d

\n", - "example": 569, - "start_line": 8466, - "end_line": 8470, + "example": 594, + "start_line": 8757, + "end_line": 8761, "section": "Autolinks" }, { "markdown": "\n", "html": "

made-up-scheme://foo,bar

\n", - "example": 570, - "start_line": 8473, - "end_line": 8477, + "example": 595, + "start_line": 8764, + "end_line": 8768, "section": "Autolinks" }, { "markdown": "\n", "html": "

http://../

\n", - "example": 571, - "start_line": 8480, - "end_line": 8484, + "example": 596, + "start_line": 8771, + "end_line": 8775, "section": "Autolinks" }, { "markdown": "\n", "html": "

localhost:5001/foo

\n", - "example": 572, - "start_line": 8487, - "end_line": 8491, + "example": 597, + "start_line": 8778, + "end_line": 8782, "section": "Autolinks" }, { "markdown": "\n", "html": "

<http://foo.bar/baz bim>

\n", - "example": 573, - "start_line": 8496, - "end_line": 8500, + "example": 598, + "start_line": 8787, + "end_line": 8791, "section": "Autolinks" }, { "markdown": "\n", "html": "

http://example.com/\\[\\

\n", - "example": 574, - "start_line": 8505, - "end_line": 8509, + "example": 599, + "start_line": 8796, + "end_line": 8800, "section": "Autolinks" }, { "markdown": "\n", "html": "

foo@bar.example.com

\n", - "example": 575, - "start_line": 8527, - "end_line": 8531, + "example": 600, + "start_line": 8818, + "end_line": 8822, "section": "Autolinks" }, { "markdown": "\n", "html": "

foo+special@Bar.baz-bar0.com

\n", - "example": 576, - "start_line": 8534, - "end_line": 8538, + "example": 601, + "start_line": 8825, + "end_line": 8829, "section": "Autolinks" }, { "markdown": "\n", "html": "

<foo+@bar.example.com>

\n", - "example": 577, - "start_line": 8543, - "end_line": 8547, + "example": 602, + "start_line": 8834, + "end_line": 8838, "section": "Autolinks" }, { "markdown": "<>\n", "html": "

<>

\n", - "example": 578, - "start_line": 8552, - "end_line": 8556, + "example": 603, + "start_line": 8843, + "end_line": 8847, "section": "Autolinks" }, { "markdown": "< http://foo.bar >\n", "html": "

< http://foo.bar >

\n", - "example": 579, - "start_line": 8559, - "end_line": 8563, + "example": 604, + "start_line": 8850, + "end_line": 8854, "section": "Autolinks" }, { "markdown": "\n", "html": "

<m:abc>

\n", - "example": 580, - "start_line": 8566, - "end_line": 8570, + "example": 605, + "start_line": 8857, + "end_line": 8861, "section": "Autolinks" }, { "markdown": "\n", "html": "

<foo.bar.baz>

\n", - "example": 581, - "start_line": 8573, - "end_line": 8577, + "example": 606, + "start_line": 8864, + "end_line": 8868, "section": "Autolinks" }, { "markdown": "http://example.com\n", "html": "

http://example.com

\n", - "example": 582, - "start_line": 8580, - "end_line": 8584, + "example": 607, + "start_line": 8871, + "end_line": 8875, "section": "Autolinks" }, { "markdown": "foo@bar.example.com\n", "html": "

foo@bar.example.com

\n", - "example": 583, - "start_line": 8587, - "end_line": 8591, + "example": 608, + "start_line": 8878, + "end_line": 8882, "section": "Autolinks" }, { "markdown": "\n", "html": "

\n", - "example": 584, - "start_line": 8669, - "end_line": 8673, + "example": 609, + "start_line": 8960, + "end_line": 8964, "section": "Raw HTML" }, { "markdown": "\n", "html": "

\n", - "example": 585, - "start_line": 8678, - "end_line": 8682, + "example": 610, + "start_line": 8969, + "end_line": 8973, "section": "Raw HTML" }, { "markdown": "\n", "html": "

\n", - "example": 586, - "start_line": 8687, - "end_line": 8693, + "example": 611, + "start_line": 8978, + "end_line": 8984, "section": "Raw HTML" }, { "markdown": "\n", "html": "

\n", - "example": 587, - "start_line": 8698, - "end_line": 8704, + "example": 612, + "start_line": 8989, + "end_line": 8995, "section": "Raw HTML" }, { "markdown": "Foo \n", "html": "

Foo

\n", - "example": 588, - "start_line": 8709, - "end_line": 8713, + "example": 613, + "start_line": 9000, + "end_line": 9004, "section": "Raw HTML" }, { "markdown": "<33> <__>\n", "html": "

<33> <__>

\n", - "example": 589, - "start_line": 8718, - "end_line": 8722, + "example": 614, + "start_line": 9009, + "end_line": 9013, "section": "Raw HTML" }, { "markdown": "
\n", "html": "

<a h*#ref="hi">

\n", - "example": 590, - "start_line": 8727, - "end_line": 8731, + "example": 615, + "start_line": 9018, + "end_line": 9022, "section": "Raw HTML" }, { "markdown": "
\n", "html": "

<a href="hi'> <a href=hi'>

\n", - "example": 591, - "start_line": 8736, - "end_line": 8740, + "example": 616, + "start_line": 9027, + "end_line": 9031, "section": "Raw HTML" }, { - "markdown": "< a><\nfoo>\n", - "html": "

< a><\nfoo><bar/ >

\n", - "example": 592, - "start_line": 8745, - "end_line": 8751, + "markdown": "< a><\nfoo>\n\n", + "html": "

< a><\nfoo><bar/ >\n<foo bar=baz\nbim!bop />

\n", + "example": 617, + "start_line": 9036, + "end_line": 9046, "section": "Raw HTML" }, { "markdown": "
\n", "html": "

<a href='bar'title=title>

\n", - "example": 593, - "start_line": 8756, - "end_line": 8760, + "example": 618, + "start_line": 9051, + "end_line": 9055, "section": "Raw HTML" }, { "markdown": "
\n", "html": "

\n", - "example": 594, - "start_line": 8765, - "end_line": 8769, + "example": 619, + "start_line": 9060, + "end_line": 9064, "section": "Raw HTML" }, { "markdown": "\n", "html": "

</a href="foo">

\n", - "example": 595, - "start_line": 8774, - "end_line": 8778, + "example": 620, + "start_line": 9069, + "end_line": 9073, "section": "Raw HTML" }, { "markdown": "foo \n", "html": "

foo

\n", - "example": 596, - "start_line": 8783, - "end_line": 8789, + "example": 621, + "start_line": 9078, + "end_line": 9084, "section": "Raw HTML" }, { "markdown": "foo \n", "html": "

foo <!-- not a comment -- two hyphens -->

\n", - "example": 597, - "start_line": 8792, - "end_line": 8796, + "example": 622, + "start_line": 9087, + "end_line": 9091, "section": "Raw HTML" }, { "markdown": "foo foo -->\n\nfoo \n", "html": "

foo <!--> foo -->

\n

foo <!-- foo--->

\n", - "example": 598, - "start_line": 8801, - "end_line": 8808, + "example": 623, + "start_line": 9096, + "end_line": 9103, "section": "Raw HTML" }, { "markdown": "foo \n", "html": "

foo

\n", - "example": 599, - "start_line": 8813, - "end_line": 8817, + "example": 624, + "start_line": 9108, + "end_line": 9112, "section": "Raw HTML" }, { "markdown": "foo \n", "html": "

foo

\n", - "example": 600, - "start_line": 8822, - "end_line": 8826, + "example": 625, + "start_line": 9117, + "end_line": 9121, "section": "Raw HTML" }, { "markdown": "foo &<]]>\n", "html": "

foo &<]]>

\n", - "example": 601, - "start_line": 8831, - "end_line": 8835, + "example": 626, + "start_line": 9126, + "end_line": 9130, "section": "Raw HTML" }, { "markdown": "foo \n", "html": "

foo

\n", - "example": 602, - "start_line": 8841, - "end_line": 8845, + "example": 627, + "start_line": 9136, + "end_line": 9140, "section": "Raw HTML" }, { "markdown": "foo \n", "html": "

foo

\n", - "example": 603, - "start_line": 8850, - "end_line": 8854, + "example": 628, + "start_line": 9145, + "end_line": 9149, "section": "Raw HTML" }, { "markdown": "\n", "html": "

<a href=""">

\n", - "example": 604, - "start_line": 8857, - "end_line": 8861, + "example": 629, + "start_line": 9152, + "end_line": 9156, "section": "Raw HTML" }, { "markdown": "foo \nbaz\n", "html": "

foo
\nbaz

\n", - "example": 605, - "start_line": 8871, - "end_line": 8877, + "example": 630, + "start_line": 9166, + "end_line": 9172, "section": "Hard line breaks" }, { "markdown": "foo\\\nbaz\n", "html": "

foo
\nbaz

\n", - "example": 606, - "start_line": 8883, - "end_line": 8889, + "example": 631, + "start_line": 9178, + "end_line": 9184, "section": "Hard line breaks" }, { "markdown": "foo \nbaz\n", "html": "

foo
\nbaz

\n", - "example": 607, - "start_line": 8894, - "end_line": 8900, + "example": 632, + "start_line": 9189, + "end_line": 9195, "section": "Hard line breaks" }, { "markdown": "foo \n bar\n", "html": "

foo
\nbar

\n", - "example": 608, - "start_line": 8905, - "end_line": 8911, + "example": 633, + "start_line": 9200, + "end_line": 9206, "section": "Hard line breaks" }, { "markdown": "foo\\\n bar\n", "html": "

foo
\nbar

\n", - "example": 609, - "start_line": 8914, - "end_line": 8920, + "example": 634, + "start_line": 9209, + "end_line": 9215, "section": "Hard line breaks" }, { "markdown": "*foo \nbar*\n", "html": "

foo
\nbar

\n", - "example": 610, - "start_line": 8926, - "end_line": 8932, + "example": 635, + "start_line": 9221, + "end_line": 9227, "section": "Hard line breaks" }, { "markdown": "*foo\\\nbar*\n", "html": "

foo
\nbar

\n", - "example": 611, - "start_line": 8935, - "end_line": 8941, + "example": 636, + "start_line": 9230, + "end_line": 9236, "section": "Hard line breaks" }, { - "markdown": "`code \nspan`\n", - "html": "

code span

\n", - "example": 612, - "start_line": 8946, - "end_line": 8951, + "markdown": "`code \nspan`\n", + "html": "

code span

\n", + "example": 637, + "start_line": 9241, + "end_line": 9246, "section": "Hard line breaks" }, { "markdown": "`code\\\nspan`\n", "html": "

code\\ span

\n", - "example": 613, - "start_line": 8954, - "end_line": 8959, + "example": 638, + "start_line": 9249, + "end_line": 9254, "section": "Hard line breaks" }, { "markdown": "
\n", "html": "

\n", - "example": 614, - "start_line": 8964, - "end_line": 8970, + "example": 639, + "start_line": 9259, + "end_line": 9265, "section": "Hard line breaks" }, { "markdown": "\n", "html": "

\n", - "example": 615, - "start_line": 8973, - "end_line": 8979, + "example": 640, + "start_line": 9268, + "end_line": 9274, "section": "Hard line breaks" }, { "markdown": "foo\\\n", "html": "

foo\\

\n", - "example": 616, - "start_line": 8986, - "end_line": 8990, + "example": 641, + "start_line": 9281, + "end_line": 9285, "section": "Hard line breaks" }, { "markdown": "foo \n", "html": "

foo

\n", - "example": 617, - "start_line": 8993, - "end_line": 8997, + "example": 642, + "start_line": 9288, + "end_line": 9292, "section": "Hard line breaks" }, { "markdown": "### foo\\\n", "html": "

foo\\

\n", - "example": 618, - "start_line": 9000, - "end_line": 9004, + "example": 643, + "start_line": 9295, + "end_line": 9299, "section": "Hard line breaks" }, { "markdown": "### foo \n", "html": "

foo

\n", - "example": 619, - "start_line": 9007, - "end_line": 9011, + "example": 644, + "start_line": 9302, + "end_line": 9306, "section": "Hard line breaks" }, { "markdown": "foo\nbaz\n", "html": "

foo\nbaz

\n", - "example": 620, - "start_line": 9022, - "end_line": 9028, + "example": 645, + "start_line": 9317, + "end_line": 9323, "section": "Soft line breaks" }, { "markdown": "foo \n baz\n", "html": "

foo\nbaz

\n", - "example": 621, - "start_line": 9034, - "end_line": 9040, + "example": 646, + "start_line": 9329, + "end_line": 9335, "section": "Soft line breaks" }, { "markdown": "hello $.;'there\n", "html": "

hello $.;'there

\n", - "example": 622, - "start_line": 9054, - "end_line": 9058, + "example": 647, + "start_line": 9349, + "end_line": 9353, "section": "Textual content" }, { "markdown": "Foo χρῆν\n", "html": "

Foo χρῆν

\n", - "example": 623, - "start_line": 9061, - "end_line": 9065, + "example": 648, + "start_line": 9356, + "end_line": 9360, "section": "Textual content" }, { "markdown": "Multiple spaces\n", "html": "

Multiple spaces

\n", - "example": 624, - "start_line": 9070, - "end_line": 9074, + "example": 649, + "start_line": 9365, + "end_line": 9369, "section": "Textual content" } -] +] \ No newline at end of file diff --git a/tests/test_commonmark/spec.sh b/tests/test_commonmark/spec.sh index 2750ef00..034fd86e 100755 --- a/tests/test_commonmark/spec.sh +++ b/tests/test_commonmark/spec.sh @@ -3,7 +3,7 @@ set -e REPO="https://github.com/commonmark/CommonMark.git" -VERSION="0.28" +VERSION="0.29" function main { echo "Cloning from repo: $REPO..." diff --git a/tests/test_commonmark/test_commonmark.py b/tests/test_commonmark/test_commonmark.py index ae1123aa..ca2c8b75 100644 --- a/tests/test_commonmark/test_commonmark.py +++ b/tests/test_commonmark/test_commonmark.py @@ -6,8 +6,7 @@ import pytest -from myst_parser.block_tokens import Document -from myst_parser.html_renderer import HTMLRenderer +from myst_parser.main import to_html with open(os.path.join(os.path.dirname(__file__), "commonmark.json"), "r") as fin: tests = json.load(fin) @@ -18,14 +17,35 @@ def test_commonmark(entry): if entry["example"] == 14: # This is just a test that +++ are not parsed as thematic breaks pytest.skip("Expects '+++' to be unconverted (not block break).") - if entry["example"] in [65, 67]: + if entry["example"] in [66, 68]: # Front matter is supported by numerous Markdown flavours, # but not strictly CommonMark, # see: https://talk.commonmark.org/t/metadata-in-documents/721/86 pytest.skip( "Thematic breaks on the first line conflict with front matter syntax" ) - test_case = entry["markdown"].splitlines(keepends=True) - with HTMLRenderer() as renderer: - output = renderer.render(Document.read(test_case)) + if entry["example"] in [108, 334]: + # TODO fix failing empty code span tests (awaiting upstream); + # ``` ``` -> not + pytest.skip("empty code span spacing") + if entry["example"] in [ + 171, # [foo]: /url\\bar\\*baz \"foo\\\"bar\\baz\"\n\n[foo]\n + 306, # \n + 308, # [foo](/bar\\* \"ti\\*tle\")\n + 309, # [foo]\n\n[foo]: /bar\\* \"ti\\*tle\"\n + 310, # ``` foo\\+bar\nfoo\n```\n + 502, # [link](/url \"title \\\""\")\n + 599, # \n + ]: + # TODO fix url backslash escaping (awaiting upstream) + pytest.skip("url backslash escaping") + test_case = entry["markdown"] + output = to_html(test_case) + + if entry["example"] in [187, 209, 210]: + # this doesn't have any bearing on the output + output = output.replace( + "
", "
\n
" + ) + assert output == entry["html"] diff --git a/tests/test_renderers/conftest.py b/tests/test_renderers/conftest.py deleted file mode 100644 index a885643e..00000000 --- a/tests/test_renderers/conftest.py +++ /dev/null @@ -1,26 +0,0 @@ -from unittest import mock - -import pytest - -from myst_parser.docutils_renderer import SphinxRenderer - - -@pytest.fixture -def renderer(): - renderer = SphinxRenderer() - with renderer: - yield renderer - - -@pytest.fixture -def sphinx_renderer(): - with SphinxRenderer(load_sphinx_env=True) as renderer: - yield renderer - - -@pytest.fixture -def renderer_mock(): - renderer = SphinxRenderer() - renderer.render_inner = mock.Mock(return_value="inner") - with renderer: - yield renderer diff --git a/tests/test_renderers/fixtures/basic.md b/tests/test_renderers/fixtures/basic.md new file mode 100644 index 00000000..a4fb469d --- /dev/null +++ b/tests/test_renderers/fixtures/basic.md @@ -0,0 +1,556 @@ +--------------------------- +Raw +. +foo +. + + + foo +. + +--------------------------- +Strong: +. +**foo** +. + + + + foo +. + +--------------------------- +Emphasis +. +*foo* +. + + + + foo +. + +--------------------------- +Escaped Emphasis: +. +\*foo* +. + + + *foo* +. + +-------------------------- +Mixed Inline +. +a *b* **c** `abc` \\* +. + + + a + + b + + + c + + + abc + \* +. + +-------------------------- +Inline Code: +. +`foo` +. + + + + foo +. + +-------------------------- +Heading: +. +# foo +. + +
+ + foo +. + +-------------------------- +Heading Levels: +. +# a +## b +### c +# d +. +<document source="notset"> + <section ids="a" names="a"> + <title> + a + <section ids="b" names="b"> + <title> + b + <section ids="c" names="c"> + <title> + c + <section ids="d" names="d"> + <title> + d +. + +-------------------------- +Block Code: +. +```sh +foo +``` +. +<document source="notset"> + <literal_block language="sh" xml:space="preserve"> + foo +. + +-------------------------- +Block Code no language: +. +``` +foo +``` +. +<document source="notset"> + <literal_block language="" xml:space="preserve"> + foo +. + +-------------------------- +Image empty: +. +![]() +. +<document source="notset"> + <paragraph> + <image alt="" uri=""> +. + +-------------------------- +Image with alt and title: +. +![alt](src "title") +. +<document source="notset"> + <paragraph> + <image alt="alt" uri="src"> +. + +-------------------------- +Image with escapable html: +. +![alt](http://www.google<>.com) +. +<document source="notset"> + <paragraph> + <image alt="alt" uri="http://www.google%3C%3E.com"> +. + +-------------------------- +Block Quote: +. +> *foo* +. +<document source="notset"> + <block_quote> + <paragraph> + <emphasis> + foo +. + +-------------------------- +Bullet List: +. +- *foo* +. +<document source="notset"> + <bullet_list> + <list_item> + <paragraph> + <emphasis> + foo +. + +-------------------------- +Nested Bullets +. +- a + - b + - c + - d +. +<document source="notset"> + <bullet_list> + <list_item> + <paragraph> + a + <bullet_list> + <list_item> + <paragraph> + b + <bullet_list> + <list_item> + <paragraph> + c + <list_item> + <paragraph> + d +. + +-------------------------- +Enumerated List: +. +1. *foo* +. +<document source="notset"> + <enumerated_list> + <list_item> + <paragraph> + <emphasis> + foo +. + +-------------------------- +Nested Enumrated List: +. +1. a +2. b + 1. c +. +<document source="notset"> + <enumerated_list> + <list_item> + <paragraph> + a + <list_item> + <paragraph> + b + <enumerated_list> + <list_item> + <paragraph> + c +. + +-------------------------- +Inline Math: +. +$foo$ +. +<document source="notset"> + <paragraph> + <math> + foo +. + +-------------------------- +Math Block: +. +$$foo$$ +. +<document source="notset"> + <math_block nowrap="False" number="True" xml:space="preserve"> + foo +. + +-------------------------- +Sphinx Role containing backtick: +. +{code}``a=1{`}`` +. +<document source="notset"> + <paragraph> + <literal classes="code"> + a=1{`} +. + +-------------------------- +Target: +. +(target)= +. +<document source="notset"> + <target ids="target" names="target"> +. + +-------------------------- +Referencing: +. +(target)= + +Title +----- + +[alt1](target) + +[](target2) + +[alt2](https://www.google.com) + +[alt3](#target3) +. +<document source="notset"> + <target ids="target" names="target"> + <section ids="title" names="title"> + <title> + Title + <paragraph> + <pending_xref refdomain="True" refexplicit="True" reftarget="target" reftype="any" refwarn="True"> + <literal classes="xref any"> + alt1 + <paragraph> + <pending_xref refdomain="True" refexplicit="False" reftarget="target2" reftype="any" refwarn="True"> + <literal classes="xref any"> + <paragraph> + <reference refuri="https://www.google.com"> + alt2 + <paragraph> + <reference refuri="#target3"> + alt3 +. + +-------------------------- +Comments: +. +line 1 +% a comment +line 2 +. +<document source="notset"> + <paragraph> + line 1 + <comment xml:space="preserve"> + a comment + <paragraph> + line 2 +. + +-------------------------- +Block Break: +. ++++ string +. +<document source="notset"> + <comment classes="block_break" xml:space="preserve"> + string +. + +-------------------------- +Link Reference: +. +[name][key] + +[key]: https://www.google.com "a title" +. +<document source="notset"> + <paragraph> + <reference refuri="https://www.google.com" title="a title"> + name +. + +-------------------------- +Link Reference short version: +. +[name] + +[name]: https://www.google.com "a title" +. +<document source="notset"> + <paragraph> + <reference refuri="https://www.google.com" title="a title"> + name +. + +-------------------------- +Block Quotes: +. +```{epigraph} +a b*c* + +-- a**b** +``` +. +<document source="notset"> + <block_quote classes="epigraph"> + <paragraph> + a b + <emphasis> + c + <attribution> + a + <strong> + b +. + +-------------------------- +Link Definition in directive: +. +```{note} +[a] +``` + +[a]: link +. +<document source="notset"> + <note> + <paragraph> + <pending_xref refdomain="True" refexplicit="True" reftarget="link" reftype="any" refwarn="True"> + <literal classes="xref any"> + a +. + +-------------------------- +Link Definition in nested directives: +. +```{note} +[ref1]: link +``` + +```{note} +[ref1] +[ref2] +``` + +```{note} +[ref2]: link +``` +. +<document source="notset"> + <note> + <note> + <paragraph> + <pending_xref refdomain="True" refexplicit="True" reftarget="link" reftype="any" refwarn="True"> + <literal classes="xref any"> + ref1 + + [ref2] + <note> +. + +-------------------------- +Footnotes: +. +[^a] + +[^a]: footnote*text* +. +<document source="notset"> + <paragraph> + <footnote_reference auto="1" ids="id1" refname="a"> + <transition> + <footnote auto="1" ids="a" names="a"> + <paragraph> + footnote + <emphasis> + text +. + +-------------------------- +Front Matter: +. +--- +a: 1 +b: foo +c: + d: 2 +--- +. +<document source="notset"> + <docinfo> + <field> + <field_name> + a + <field_body> + 1 + <field> + <field_name> + b + <field_body> + foo + <field> + <field_name> + c + <field_body> + {"d": 2} +. + +-------------------------- +Full Test: +. +--- +a: 1 +--- + +(target)= +# header 1 +## sub header 1 + +a *b* **c** `abc` + +## sub header 2 + +x y [a](http://www.xyz.com) z + +--- + +# header 2 + +```::python {a=1} +a = 1 +``` + +[](target) +. +<document source="notset"> + <docinfo> + <field> + <field_name> + a + <field_body> + 1 + <target ids="target" names="target"> + <section ids="header-1" names="header\ 1"> + <title> + header 1 + <section ids="sub-header-1" names="sub\ header\ 1"> + <title> + sub header 1 + <paragraph> + a + <emphasis> + b + + <strong> + c + + <literal> + abc + <section ids="sub-header-2" names="sub\ header\ 2"> + <title> + sub header 2 + <paragraph> + x y + <reference refuri="http://www.xyz.com"> + a + z + <transition> + <section ids="header-2" names="header\ 2"> + <title> + header 2 + <literal_block language="::python" xml:space="preserve"> + a = 1 + <paragraph> + <pending_xref refdomain="True" refexplicit="False" reftarget="target" reftype="any" refwarn="True"> + <literal classes="xref any"> +. diff --git a/tests/test_renderers/fixtures/docutil_directives.md b/tests/test_renderers/fixtures/docutil_directives.md new file mode 100644 index 00000000..e61a4601 --- /dev/null +++ b/tests/test_renderers/fixtures/docutil_directives.md @@ -0,0 +1,430 @@ +-------------------------------- +attention (`docutils.parsers.rst.directives.admonitions.Attention`): +. +```{attention} + +a +``` +. +<document source="notset"> + <attention> + <paragraph> + a +. + +-------------------------------- +caution (`docutils.parsers.rst.directives.admonitions.Caution`): +. +```{caution} + +a +``` +. +<document source="notset"> + <caution> + <paragraph> + a +. + +-------------------------------- +danger (`docutils.parsers.rst.directives.admonitions.Danger`): +. +```{danger} + +a +``` +. +<document source="notset"> + <danger> + <paragraph> + a +. + +-------------------------------- +error (`docutils.parsers.rst.directives.admonitions.Error`): +. +```{error} + +a +``` +. +<document source="notset"> + <error> + <paragraph> + a +. + +-------------------------------- +important (`docutils.parsers.rst.directives.admonitions.Important`): +. +```{important} + +a +``` +. +<document source="notset"> + <important> + <paragraph> + a +. + +-------------------------------- +note (`docutils.parsers.rst.directives.admonitions.Note`): +. +```{note} + +a +``` +. +<document source="notset"> + <note> + <paragraph> + a +. + +-------------------------------- +tip (`docutils.parsers.rst.directives.admonitions.Tip`): +. +```{tip} + +a +``` +. +<document source="notset"> + <tip> + <paragraph> + a +. + +-------------------------------- +hint (`docutils.parsers.rst.directives.admonitions.Hint`): +. +```{hint} + +a +``` +. +<document source="notset"> + <hint> + <paragraph> + a +. + +-------------------------------- +warning (`docutils.parsers.rst.directives.admonitions.Warning`): +. +```{warning} + +a +``` +. +<document source="notset"> + <warning> + <paragraph> + a +. + +-------------------------------- +admonition (`docutils.parsers.rst.directives.admonitions.Admonition`): +. +```{admonition} myclass + +a +``` +. +<document source="notset"> + <admonition classes="admonition-myclass"> + <title> + myclass + <paragraph> + a +. + +-------------------------------- +sidebar (`docutils.parsers.rst.directives.body.Sidebar`): +. +```{sidebar} sidebar title + +a +``` +. +<document source="notset"> + <sidebar> + <title> + sidebar title + <paragraph> + a +. + +-------------------------------- +topic (`docutils.parsers.rst.directives.body.Topic`): +. +```{topic} Topic Title + +a +``` +. +<document source="notset"> + <topic> + <title> + Topic Title + <paragraph> + a +. + +-------------------------------- +line-block (`docutils.parsers.rst.directives.body.LineBlock`): +SKIP: MockingError: MockState has not yet implemented attribute 'nest_line_block_lines' +. +```{line-block} + + +``` +. +<document source="notset"> +. + +-------------------------------- +parsed-literal (`docutils.parsers.rst.directives.body.ParsedLiteral`): +. +```{parsed-literal} + +a +``` +. +<document source="notset"> + <literal_block xml:space="preserve"> + a +. + +-------------------------------- +rubric (`docutils.parsers.rst.directives.body.Rubric`): +. +```{rubric} Rubric Title +``` +. +<document source="notset"> + <rubric> + Rubric Title +. + +-------------------------------- +epigraph (`docutils.parsers.rst.directives.body.Epigraph`): +. +```{epigraph} + +a + +-- attribution +``` +. +<document source="notset"> + <block_quote classes="epigraph"> + <paragraph> + a + <attribution> + attribution +. + +-------------------------------- +highlights (`docutils.parsers.rst.directives.body.Highlights`): +. +```{highlights} + +a + +-- attribution +``` +. +<document source="notset"> + <block_quote classes="highlights"> + <paragraph> + a + <attribution> + attribution +. + +-------------------------------- +pull-quote (`docutils.parsers.rst.directives.body.PullQuote`): +. +```{pull-quote} + +a + +-- attribution +``` +. +<document source="notset"> + <block_quote classes="pull-quote"> + <paragraph> + a + <attribution> + attribution +. + +-------------------------------- +compound (`docutils.parsers.rst.directives.body.Compound`): +. +```{compound} + +a +``` +. +<document source="notset"> + <compound> + <paragraph> + a +. + +-------------------------------- +container (`docutils.parsers.rst.directives.body.Container`): +. +```{container} + +a +``` +. +<document source="notset"> + <container> + <paragraph> + a +. + +-------------------------------- +image (`docutils.parsers.rst.directives.images.Image`): +. +```{image} path/to/image +``` +. +<document source="notset"> + <image uri="path/to/image"> +. + +-------------------------------- +raw (`docutils.parsers.rst.directives.misc.Raw`): +. +```{raw} raw + +a +``` +. +<document source="notset"> + <raw format="raw" xml:space="preserve"> + a +. + +-------------------------------- +class (`docutils.parsers.rst.directives.misc.Class`): +. +```{class} myclass + +a +``` +. +<document source="notset"> + <paragraph classes="myclass"> + a +. + +-------------------------------- +role (`docutils.parsers.rst.directives.misc.Role`): +SKIP: MockingError: MockState has not yet implemented attribute 'parse_directive_block' +. +```{role} abc +``` +. +<document source="notset"> +. + +-------------------------------- +title (`docutils.parsers.rst.directives.misc.Title`): +. +```{title} title +``` +. +<document source="notset" title="title"> +. + +-------------------------------- +restructuredtext-test-directive (`docutils.parsers.rst.directives.misc.TestDirective`): +. +```{restructuredtext-test-directive} +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=[], options={}, content: None +. + +-------------------------------- +contents (`docutils.parsers.rst.directives.parts.Contents`): +. +```{contents} Contents +``` +. +<document source="notset"> + <topic classes="contents" ids="contents" names="contents"> + <title> + Contents + <pending> + .. internal attributes: + .transform: docutils.transforms.parts.Contents + .details: +. + +-------------------------------- +sectnum (`docutils.parsers.rst.directives.parts.Sectnum`): +. +```{sectnum} +``` +. +<document source="notset"> + <pending> + .. internal attributes: + .transform: docutils.transforms.parts.SectNum + .details: +. + +-------------------------------- +header (`docutils.parsers.rst.directives.parts.Header`): +. +```{header} + +a +``` +. +<document source="notset"> + <decoration> + <header> + <paragraph> + a +. + +-------------------------------- +footer (`docutils.parsers.rst.directives.parts.Footer`): +. +```{footer} + +a +``` +. +<document source="notset"> + <decoration> + <footer> + <paragraph> + a +. + +-------------------------------- +target-notes (`docutils.parsers.rst.directives.references.TargetNotes`): +. +```{target-notes} +``` +. +<document source="notset"> + <pending> + .. internal attributes: + .transform: docutils.transforms.references.TargetNotes + .details: +. diff --git a/tests/test_renderers/fixtures/docutil_roles.md b/tests/test_renderers/fixtures/docutil_roles.md new file mode 100644 index 00000000..36bdfc75 --- /dev/null +++ b/tests/test_renderers/fixtures/docutil_roles.md @@ -0,0 +1,141 @@ +-------------------------------- +abbreviation (`docutils.parsers.rst.roles.GenericRole`): +. +{abbreviation}`a` +. +<document source="notset"> + <paragraph> + <abbreviation> + a +. + +-------------------------------- +acronym (`docutils.parsers.rst.roles.GenericRole`): +. +{acronym}`a` +. +<document source="notset"> + <paragraph> + <acronym> + a +. + +-------------------------------- +emphasis (`docutils.parsers.rst.roles.GenericRole`): +. +{emphasis}`a` +. +<document source="notset"> + <paragraph> + <emphasis> + a +. + +-------------------------------- +literal (`docutils.parsers.rst.roles.GenericRole`): +. +{literal}`a` +. +<document source="notset"> + <paragraph> + <literal> + a +. + +-------------------------------- +strong (`docutils.parsers.rst.roles.GenericRole`): +. +{strong}`a` +. +<document source="notset"> + <paragraph> + <strong> + a +. + +-------------------------------- +subscript (`docutils.parsers.rst.roles.GenericRole`): +. +{subscript}`a` +. +<document source="notset"> + <paragraph> + <subscript> + a +. + +-------------------------------- +superscript (`docutils.parsers.rst.roles.GenericRole`): +. +{superscript}`a` +. +<document source="notset"> + <paragraph> + <superscript> + a +. + +-------------------------------- +title-reference (`docutils.parsers.rst.roles.GenericRole`): +. +{title-reference}`t` +. +<document source="notset"> + <paragraph> + <title_reference> + t +. + +-------------------------------- +pep-reference (`docutils.parsers.rst.roles.pep_reference_role`): +. +{pep-reference}`0` +. +<document source="notset"> + <paragraph> + <reference refuri="http://www.python.org/dev/peps/pep-0000"> + PEP 0 +. + +-------------------------------- +rfc-reference (`docutils.parsers.rst.roles.rfc_reference_role`): +. +{rfc-reference}`1` +. +<document source="notset"> + <paragraph> + <reference refuri="http://tools.ietf.org/html/rfc1.html"> + RFC 1 +. + +-------------------------------- +raw (`docutils.parsers.rst.roles.raw_role`): +. +{raw}`` +. +<document source="notset"> + <paragraph> + {raw}`` +. + +-------------------------------- +code (`docutils.parsers.rst.roles.code_role`): +. +{code}`a` +. +<document source="notset"> + <paragraph> + <literal classes="code"> + a +. + +-------------------------------- +math (`docutils.parsers.rst.roles.math_role`): +. +{math}`a` +. +<document source="notset"> + <paragraph> + <math> + a +. diff --git a/tests/test_renderers/fixtures/role_options.md b/tests/test_renderers/fixtures/role_options.md new file mode 100644 index 00000000..55f2b79f --- /dev/null +++ b/tests/test_renderers/fixtures/role_options.md @@ -0,0 +1,154 @@ +Test Role 1: +. +```{restructuredtext-test-directive} +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=[], options={}, content: None +. + +----------------------------- +Test Role 2: +. +```{restructuredtext-test-directive} +foo +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=[], options={}, content: + <literal_block xml:space="preserve"> + foo +. + +----------------------------- +Test Role 3: +. +```{restructuredtext-test-directive} foo +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=['foo'], options={}, content: None +. + +----------------------------- +Test Role 4: +. +```{restructuredtext-test-directive} foo +bar +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=['foo'], options={}, content: + <literal_block xml:space="preserve"> + bar +. + +----------------------------- +Test Role 5: +. +```{restructuredtext-test-directive} foo bar +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=['foo bar'], options={}, content: None +. + +----------------------------- +Test Role 6: +. +```{restructuredtext-test-directive} foo bar +baz +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=['foo bar'], options={}, content: + <literal_block xml:space="preserve"> + baz +. + +----------------------------- +Test Role 7: +. +```{restructuredtext-test-directive} + +foo +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=[], options={}, content: + <literal_block xml:space="preserve"> + foo +. + +----------------------------- +Test Role Options 1: +. +```{restructuredtext-test-directive} +--- +option1: a +option2: b +--- +foo +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=[], options={'option1': 'a', 'option2': 'b'}, content: + <literal_block xml:space="preserve"> + foo +. + +----------------------------- +Test Role Options 2: +. +```{restructuredtext-test-directive} +:option1: a +:option2: b +foo +``` +. +<document source="notset"> + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Directive processed. Type="restructuredtext-test-directive", arguments=[], options={'option1': 'a', 'option2': 'b'}, content: + <literal_block xml:space="preserve"> + foo +. + +----------------------------- +Test Role Options Error: +. +```{restructuredtext-test-directive} +:option1 +:option2: b +foo +``` +. +<document source="notset"> + <system_message level="3" line="1" source="notset" type="ERROR"> + <paragraph> + Directive 'restructuredtext-test-directive': + Invalid options YAML: mapping values are not allowed here + in "<unicode string>", line 2, column 8: + option2: b + ^ + <literal_block xml:space="preserve"> + :option1 + :option2: b + foo +. diff --git a/tests/test_renderers/fixtures/sphinx_directives.md b/tests/test_renderers/fixtures/sphinx_directives.md new file mode 100644 index 00000000..c199a634 --- /dev/null +++ b/tests/test_renderers/fixtures/sphinx_directives.md @@ -0,0 +1,483 @@ +-------------------------------- +default-role (`sphinx.directives.DefaultRole`): +. +```{default-role} +``` +. +<document source="notset"> +. + +-------------------------------- +default-domain (`sphinx.directives.DefaultDomain`): +. +```{default-domain} mydomain +``` +. +<document source="notset"> +. + +-------------------------------- +describe (`sphinx.directives.ObjectDescription`): +. +```{describe} something +``` +. +<document source="notset"> + <index entries=""> + <desc desctype="describe" domain="" noindex="False" objtype="describe"> + <desc_signature first="False"> + <desc_name xml:space="preserve"> + something + <desc_content> +. + +-------------------------------- +object (`sphinx.directives.ObjectDescription`): +. +```{object} something +``` +. +<document source="notset"> + <index entries=""> + <desc desctype="object" domain="" noindex="False" objtype="object"> + <desc_signature first="False"> + <desc_name xml:space="preserve"> + something + <desc_content> +. + +-------------------------------- +highlight (`sphinx.directives.code.Highlight`): +. +```{highlight} something +``` +. +<document source="notset"> + <highlightlang force="False" lang="something" linenothreshold="9223372036854775807"> +. + +-------------------------------- +code-block (`sphinx.directives.code.CodeBlock`): +. +```{code-block} + +a=1 +``` +. +<document source="notset"> + <literal_block force="False" highlight_args="{}" language="default" xml:space="preserve"> + a=1 +. + +-------------------------------- +sourcecode (`sphinx.directives.code.CodeBlock`): +. +```{sourcecode} +``` +. +<document source="notset"> + <literal_block force="False" highlight_args="{}" language="default" xml:space="preserve"> +. + +-------------------------------- +literalinclude (`sphinx.directives.code.LiteralInclude`): +SKIP: Tested in sphinx builds +. +```{literalinclude} /path/to/file +``` +. +<document source="notset"> + <system_message level="2" line="1" source="notset" type="WARNING"> + <paragraph> + Include file '/srcdir/path/to/file' not found or reading it failed +. + +-------------------------------- +toctree (`sphinx.directives.other.TocTree`): +. +```{toctree} +``` +. +<document source="notset"> + <compound classes="toctree-wrapper"> + <toctree caption="True" entries="" glob="False" hidden="False" includefiles="" includehidden="False" maxdepth="-1" numbered="0" parent="mock_docname" titlesonly="False"> +. + +-------------------------------- +sectionauthor (`sphinx.directives.other.Author`): +. +```{sectionauthor} bob geldof +``` +. +<document source="notset"> +. + +-------------------------------- +moduleauthor (`sphinx.directives.other.Author`): +. +```{moduleauthor} ringo starr +``` +. +<document source="notset"> +. + +-------------------------------- +codeauthor (`sphinx.directives.other.Author`): +. +```{codeauthor} paul mcartney +``` +. +<document source="notset"> +. + +-------------------------------- +index (`sphinx.directives.other.Index`): +. +```{index} something +``` +. +<document source="notset"> + <index entries="('single',\ 'something',\ 'index-0',\ '',\ None)" inline="False"> + <target ids="index-0"> +. + +-------------------------------- +seealso (`sphinx.directives.other.SeeAlso`): +. +```{seealso} + +a +``` +. +<document source="notset"> + <seealso> + <paragraph> + a +. + +-------------------------------- +tabularcolumns (`sphinx.directives.other.TabularColumns`): +. +```{tabularcolumns} spec +``` +. +<document source="notset"> + <tabular_col_spec spec="spec"> +. + +-------------------------------- +centered (`sphinx.directives.other.Centered`): +. +```{centered} text +``` +. +<document source="notset"> + <centered> + text +. + +-------------------------------- +acks (`sphinx.directives.other.Acks`): +. +```{acks} + +- name +``` +. +<document source="notset"> + <acks> + <bullet_list> + <list_item> + <paragraph> + name +. + +-------------------------------- +hlist (`sphinx.directives.other.HList`): +. +```{hlist} + +- item +``` +. +<document source="notset"> + <hlist> + <hlistcol> + <bullet_list> + <list_item> + <paragraph> + item + <hlistcol> + <bullet_list> +. + +-------------------------------- +only (`sphinx.directives.other.Only`): +. +```{only} expr +``` +. +<document source="notset"> + <only expr="expr"> +. + +-------------------------------- +include (`sphinx.directives.other.Include`): +SKIP: Tested in sphinx builds +. +```{include} path/to/include +``` +. +<document source="notset"> +. + +-------------------------------- +figure (`sphinx.directives.patches.Figure`): +. +```{figure} path/to/figure + +caption + +legend +``` +. +<document source="notset"> + <figure> + <image uri="path/to/figure"> + <caption> + caption + <legend> + <paragraph> + legend +. + +-------------------------------- +meta (`sphinx.directives.patches.Meta`): +SKIP: MockingError: MockState has not yet implemented attribute 'nested_list_parse' +. +```{meta} +foo +``` +. +<document source="notset"> +. + +-------------------------------- +table (`sphinx.directives.patches.RSTTable`): +SKIP: Need to implement tables render +. +```{table} + +| a | b | +|---|---| +| 1 | 2 | +``` +. +<document source="notset"> + <table classes="colwidths-auto"> + <tgroup cols="2"> + <colspec colwidth="50.0"> + <colspec colwidth="50.0"> + <thead> + <row> + <entry> + a + <entry> + b + <tbody> + <row> + <entry> + 1 + <entry> + 2 +. + +-------------------------------- +csv-table (`sphinx.directives.patches.CSVTable`): +. +```{csv-table} + +"Albatross", 2.99, "On a stick!" +``` +. +<document source="notset"> + <table> + <tgroup cols="3"> + <colspec colwidth="33"> + <colspec colwidth="33"> + <colspec colwidth="33"> + <tbody> + <row> + <entry> + <paragraph> + Albatross + <entry> + <paragraph> + 2.99 + <entry> + <paragraph> + On a stick! +. + +-------------------------------- +list-table (`sphinx.directives.patches.ListTable`): +. +```{list-table} + +* - item +``` +. +<document source="notset"> + <table> + <tgroup cols="1"> + <colspec colwidth="100"> + <tbody> + <row> + <entry> + <paragraph> + item +. + +-------------------------------- +code (`sphinx.directives.patches.Code`): +. +```{code} python + +a +``` +. +<document source="notset"> + <literal_block force="False" highlight_args="{}" language="python" xml:space="preserve"> + a +. + +-------------------------------- +math (`sphinx.directives.patches.MathDirective`): +. +```{math} +``` +. +<document source="notset"> + <math_block docname="mock_docname" label="True" nowrap="False" number="True" xml:space="preserve"> +. + +-------------------------------- +deprecated (`sphinx.domains.changeset.VersionChange`): +. +```{deprecated} 0.3 +``` +. +<document source="notset"> + <versionmodified type="deprecated" version="0.3"> + <paragraph translatable="False"> + <inline classes="versionmodified deprecated"> + Deprecated since version 0.3. +. + +-------------------------------- +versionadded (`sphinx.domains.changeset.VersionChange`): +. +```{versionadded} 0.2 +``` +. +<document source="notset"> + <versionmodified type="versionadded" version="0.2"> + <paragraph translatable="False"> + <inline classes="versionmodified added"> + New in version 0.2. +. + +-------------------------------- +versionchanged (`sphinx.domains.changeset.VersionChange`): +. +```{versionchanged} 0.1 +``` +. +<document source="notset"> + <versionmodified type="versionchanged" version="0.1"> + <paragraph translatable="False"> + <inline classes="versionmodified changed"> + Changed in version 0.1. +. +-------------------------------- +glossary (`sphinx.domains.std.Glossary`): +. +```{glossary} + +term 1 : A +term 2 : B + Definition of both terms. +``` +. +<document source="notset"> + <glossary> + <definition_list classes="glossary"> + <definition_list_item> + <term ids="term-term-1"> + term 1 + <index entries="('single',\ 'term\ 1',\ 'term-term-1',\ 'main',\ 'A')"> + <term ids="term-term-2"> + term 2 + <index entries="('single',\ 'term\ 2',\ 'term-term-2',\ 'main',\ 'B')"> + <definition> + <paragraph> + Definition of both terms. +. + +-------------------------------- +productionlist (`sphinx.domains.std.ProductionList`): +. +```{productionlist} try_stmt: try1_stmt | try2_stmt +``` +. +<document source="notset"> + <productionlist> + <production ids="grammar-token-try-stmt" tokenname="try_stmt" xml:space="preserve"> + try1_stmt | try2_stmt +. + +-------------------------------- +cmdoption (`sphinx.domains.std.Cmdoption`): +. +```{cmdoption} a +``` +. +<document source="notset"> + <index entries="('pair',\ 'command\ line\ option;\ a',\ 'cmdoption-arg-a',\ '',\ None)"> + <desc desctype="cmdoption" domain="std" noindex="False" objtype="cmdoption"> + <desc_signature allnames="a" first="False" ids="cmdoption-arg-a" names="cmdoption-arg-a"> + <desc_name xml:space="preserve"> + a + <desc_addname xml:space="preserve"> + <desc_content> +. + +-------------------------------- +rst:directive (`sphinx.domains.rst.ReSTDirective`): +. +```{rst:directive} a +``` +. +<document source="notset"> + <index entries="('single',\ 'a\ (directive)',\ 'directive-a',\ '',\ None)"> + <desc desctype="directive" domain="rst" noindex="False" objtype="directive"> + <desc_signature first="False" ids="directive-a" names="directive-a"> + <desc_name xml:space="preserve"> + .. a:: + <desc_content> +. + +-------------------------------- +rst:directive:option (`sphinx.domains.rst.ReSTDirectiveOption`): +. +```{rst:directive:option} a +``` +. +<document source="notset"> + <index entries="('single',\ ':a:\ (directive\ option)',\ 'directive:option--a',\ '',\ 'A')"> + <desc desctype="directive:option" domain="rst" noindex="False" objtype="directive:option"> + <desc_signature first="False" ids="directive:option--a" names="directive:option--a"> + <desc_name xml:space="preserve"> + :a: + <desc_content> +. diff --git a/tests/test_renderers/fixtures/sphinx_roles.md b/tests/test_renderers/fixtures/sphinx_roles.md new file mode 100644 index 00000000..950c1276 --- /dev/null +++ b/tests/test_renderers/fixtures/sphinx_roles.md @@ -0,0 +1,672 @@ +-------------------------------- +c:func (`sphinx.domains.c.CXRefRole`): +. +{c:func}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="c" refexplicit="False" reftarget="a" reftype="func" refwarn="False"> + <literal classes="xref c c-func"> + a() +. + +-------------------------------- +c:member (`sphinx.domains.c.CObject`): +. +{c:member}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="c" refexplicit="False" reftarget="a" reftype="member" refwarn="False"> + <literal classes="xref c c-member"> + a +. + +-------------------------------- +c:macro (`sphinx.domains.c.CObject`): +. +{c:macro}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="c" refexplicit="False" reftarget="a" reftype="macro" refwarn="False"> + <literal classes="xref c c-macro"> + a +. + +-------------------------------- +c:data (`sphinx.domains.c.CXRefRole`): +. +{c:data}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="c" refexplicit="False" reftarget="a" reftype="data" refwarn="False"> + <literal classes="xref c c-data"> + a +. + +-------------------------------- +c:type (`sphinx.domains.c.CObject`): +. +{c:type}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="c" refexplicit="False" reftarget="a" reftype="type" refwarn="False"> + <literal classes="xref c c-type"> + a +. + +-------------------------------- +cpp:any (`sphinx.domains.cpp.CPPXRefRole`): +. +{cpp:any}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="any" refwarn="False"> + <literal classes="xref cpp cpp-any"> + a +. + +-------------------------------- +cpp:class (`sphinx.domains.cpp.CPPClassObject`): +. +{cpp:class}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="class" refwarn="False"> + <literal classes="xref cpp cpp-class"> + a +. + +-------------------------------- +cpp:struct (`sphinx.domains.cpp.CPPClassObject`): +. +{cpp:struct}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="struct" refwarn="False"> + <literal classes="xref cpp cpp-struct"> + a +. + +-------------------------------- +cpp:union (`sphinx.domains.cpp.CPPUnionObject`): +. +{cpp:union}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="union" refwarn="False"> + <literal classes="xref cpp cpp-union"> + a +. + +-------------------------------- +cpp:func (`sphinx.domains.cpp.CPPXRefRole`): +. +{cpp:func}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="func" refwarn="False"> + <literal classes="xref cpp cpp-func"> + a() +. + +-------------------------------- +cpp:member (`sphinx.domains.cpp.CPPMemberObject`): +. +{cpp:member}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="member" refwarn="False"> + <literal classes="xref cpp cpp-member"> + a +. + +-------------------------------- +cpp:var (`sphinx.domains.cpp.CPPMemberObject`): +. +{cpp:var}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="var" refwarn="False"> + <literal classes="xref cpp cpp-var"> + a +. + +-------------------------------- +cpp:type (`sphinx.domains.cpp.CPPTypeObject`): +. +{cpp:type}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="type" refwarn="False"> + <literal classes="xref cpp cpp-type"> + a +. + +-------------------------------- +cpp:concept (`sphinx.domains.cpp.CPPConceptObject`): +. +{cpp:concept}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="concept" refwarn="False"> + <literal classes="xref cpp cpp-concept"> + a +. + +-------------------------------- +cpp:enum (`sphinx.domains.cpp.CPPEnumObject`): +. +{cpp:enum}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="enum" refwarn="False"> + <literal classes="xref cpp cpp-enum"> + a +. + +-------------------------------- +cpp:enumerator (`sphinx.domains.cpp.CPPEnumeratorObject`): +. +{cpp:enumerator}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="cpp" refexplicit="False" reftarget="a" reftype="enumerator" refwarn="False"> + <literal classes="xref cpp cpp-enumerator"> + a +. + +-------------------------------- +cpp:expr (`sphinx.domains.cpp.CPPExprRole`): +. +{cpp:expr}`a` +. +<document source="notset"> + <paragraph> + <literal classes="xref cpp cpp-expr"> + <pending_xref classname="True" cpp:parent_key="" modname="True" refdomain="cpp" reftarget="a" reftype="identifier"> + a +. + +-------------------------------- +cpp:texpr (`sphinx.domains.cpp.CPPExprRole`): +. +{cpp:texpr}`a` +. +<document source="notset"> + <paragraph> + <inline classes="xref cpp cpp-texpr"> + <pending_xref classname="True" cpp:parent_key="" modname="True" refdomain="cpp" reftarget="a" reftype="identifier"> + a +. + +-------------------------------- +js:func (`sphinx.domains.javascript.JSXRefRole`): +. +{js:func}`a` +. +<document source="notset"> + <paragraph> + <pending_xref js:module="True" js:object="True" refdoc="mock_docname" refdomain="js" refexplicit="False" reftarget="a" reftype="func" refwarn="False"> + <literal classes="xref js js-func"> + a() +. + +-------------------------------- +js:meth (`sphinx.domains.javascript.JSXRefRole`): +. +{js:meth}`a` +. +<document source="notset"> + <paragraph> + <pending_xref js:module="True" js:object="True" refdoc="mock_docname" refdomain="js" refexplicit="False" reftarget="a" reftype="meth" refwarn="False"> + <literal classes="xref js js-meth"> + a() +. + +-------------------------------- +js:class (`sphinx.domains.javascript.JSConstructor`): +. +{js:class}`a` +. +<document source="notset"> + <paragraph> + <pending_xref js:module="True" js:object="True" refdoc="mock_docname" refdomain="js" refexplicit="False" reftarget="a" reftype="class" refwarn="False"> + <literal classes="xref js js-class"> + a() +. + +-------------------------------- +js:data (`sphinx.domains.javascript.JSObject`): +. +{js:data}`a` +. +<document source="notset"> + <paragraph> + <pending_xref js:module="True" js:object="True" refdoc="mock_docname" refdomain="js" refexplicit="False" reftarget="a" reftype="data" refwarn="False"> + <literal classes="xref js js-data"> + a +. + +-------------------------------- +js:attr (`sphinx.domains.javascript.JSXRefRole`): +. +{js:attr}`a` +. +<document source="notset"> + <paragraph> + <pending_xref js:module="True" js:object="True" refdoc="mock_docname" refdomain="js" refexplicit="False" reftarget="a" reftype="attr" refwarn="False"> + <literal classes="xref js js-attr"> + a +. + +-------------------------------- +js:mod (`sphinx.domains.javascript.JSXRefRole`): +. +{js:mod}`a` +. +<document source="notset"> + <paragraph> + <pending_xref js:module="True" js:object="True" refdoc="mock_docname" refdomain="js" refexplicit="False" reftarget="a" reftype="mod" refwarn="False"> + <literal classes="xref js js-mod"> + a +. + +-------------------------------- +eq (`sphinx.domains.math.MathReferenceRole`): +. +{eq}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="math" refexplicit="False" reftarget="a" reftype="eq" refwarn="True"> + <literal classes="xref eq"> + a +. + +-------------------------------- +math:numref (`sphinx.domains.math.MathReferenceRole`): +. +{math:numref}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="math" refexplicit="False" reftarget="a" reftype="numref" refwarn="False"> + <literal classes="xref math math-numref"> + a +. + +-------------------------------- +py:data (`sphinx.domains.python.PyVariable`): +. +{py:data}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="data" refwarn="False"> + <literal classes="xref py py-data"> + a +. + +-------------------------------- +py:exc (`sphinx.domains.python.PyXRefRole`): +. +{py:exc}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="exc" refwarn="False"> + <literal classes="xref py py-exc"> + a +. + +-------------------------------- +py:func (`sphinx.domains.python.PyXRefRole`): +. +{py:func}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="func" refwarn="False"> + <literal classes="xref py py-func"> + a() +. + +-------------------------------- +py:class (`sphinx.domains.python.PyClasslike`): +. +{py:class}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="class" refwarn="False"> + <literal classes="xref py py-class"> + a +. + +-------------------------------- +py:const (`sphinx.domains.python.PyXRefRole`): +. +{py:const}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="const" refwarn="False"> + <literal classes="xref py py-const"> + a +. + +-------------------------------- +py:attr (`sphinx.domains.python.PyXRefRole`): +. +{py:attr}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="attr" refwarn="False"> + <literal classes="xref py py-attr"> + a +. + +-------------------------------- +py:meth (`sphinx.domains.python.PyXRefRole`): +. +{py:meth}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="meth" refwarn="False"> + <literal classes="xref py py-meth"> + a() +. + +-------------------------------- +py:mod (`sphinx.domains.python.PyXRefRole`): +. +{py:mod}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="mod" refwarn="False"> + <literal classes="xref py py-mod"> + a +. + +-------------------------------- +py:obj (`sphinx.domains.python.PyXRefRole`): +. +{py:obj}`a` +. +<document source="notset"> + <paragraph> + <pending_xref py:class="True" py:module="True" refdoc="mock_docname" refdomain="py" refexplicit="False" reftarget="a" reftype="obj" refwarn="False"> + <literal classes="xref py py-obj"> + a +. + +-------------------------------- +rst:role (`sphinx.domains.rst.ReSTRole`): +. +{rst:role}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="rst" refexplicit="False" reftarget="a" reftype="role" refwarn="False"> + <literal classes="xref rst rst-role"> + a +. + +-------------------------------- +program (`sphinx.domains.std.Program`): +. +{program}`a` +. +<document source="notset"> + <paragraph> + <literal_strong classes="program"> + a +. + +-------------------------------- +option (`sphinx.domains.std.Cmdoption`): +. +{option}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="option" refwarn="True" std:program="True"> + <literal classes="xref std std-option"> + a +. + +-------------------------------- +envvar (`sphinx.domains.std.EnvVarXRefRole`): +. +{envvar}`a` +. +<document source="notset"> + <paragraph> + <index entries="('single',\ 'a',\ 'index-0',\ '',\ None) ('single',\ 'environment\ variable;\ a',\ 'index-0',\ '',\ None)"> + <target ids="index-0"> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="envvar" refwarn="False"> + <literal classes="xref std std-envvar"> + a +. + +-------------------------------- +index (`sphinx.roles.Index`): +. +{index}`a` +. +<document source="notset"> + <paragraph> + <index entries="('single',\ 'a',\ 'index-0',\ '',\ None)"> + <target ids="index-0"> + a +. + +-------------------------------- +download (`sphinx.roles.XRefRole`): +. +{download}`a` +. +<document source="notset"> + <paragraph> + <download_reference refdoc="mock_docname" refdomain="" refexplicit="False" reftarget="a" reftype="download" refwarn="False"> + <literal classes="xref download"> + a +. + +-------------------------------- +any (`sphinx.roles.AnyXRefRole`): +. +{any}`a <alt text>` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="" refexplicit="True" reftarget="alt text" reftype="any" refwarn="True"> + <literal classes="xref any"> + a +. + +-------------------------------- +pep (`sphinx.roles.PEP`): +. +{pep}`1` +. +<document source="notset"> + <paragraph> + <index entries="('single',\ 'Python\ Enhancement\ Proposals;\ PEP\ 1',\ 'index-0',\ '',\ None)"> + <target ids="index-0"> + <reference classes="pep" internal="False" refuri="http://www.python.org/dev/peps/pep-0001"> + <strong> + PEP 1 +. + +-------------------------------- +rfc (`sphinx.roles.RFC`): +. +{rfc}`1` +. +<document source="notset"> + <paragraph> + <index entries="('single',\ 'RFC;\ RFC\ 1',\ 'index-0',\ '',\ None)"> + <target ids="index-0"> + <reference classes="rfc" internal="False" refuri="http://tools.ietf.org/html/rfc1.html"> + <strong> + RFC 1 +. + +-------------------------------- +guilabel (`sphinx.roles.GUILabel`): +. +{guilabel}`a` +. +<document source="notset"> + <paragraph> + <inline classes="guilabel" rawtext=":guilabel:`a`"> + a +. + +-------------------------------- +menuselection (`sphinx.roles.MenuSelection`): +. +{menuselection}`a` +. +<document source="notset"> + <paragraph> + <inline classes="menuselection" rawtext=":menuselection:`a`"> + a +. + +-------------------------------- +file (`sphinx.roles.EmphasizedLiteral`): +. +{file}`a` +. +<document source="notset"> + <paragraph> + <literal classes="file" role="file"> + a +. + +-------------------------------- +samp (`sphinx.roles.EmphasizedLiteral`): +. +{samp}`a` +. +<document source="notset"> + <paragraph> + <literal classes="samp" role="samp"> + a +. + +-------------------------------- +abbr (`sphinx.roles.Abbreviation`): +SKIP: Non-deterministic output +. +{abbr}`a` +. +<document source="notset"> + <paragraph> + <abbreviation class="<function class_option at 0x1079fb830>> + a +. + +-------------------------------- +rst:dir (`sphinx.roles.XRefRole`): +. +{rst:dir}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="rst" refexplicit="False" reftarget="a" reftype="dir" refwarn="False"> + <literal classes="xref rst rst-dir"> + a +. + +-------------------------------- +token (`sphinx.roles.XRefRole`): +. +{token}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="token" refwarn="False"> + <literal classes="xref std std-token"> + a +. + +-------------------------------- +term (`sphinx.roles.XRefRole`): +. +{term}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="term" refwarn="True"> + <inline classes="xref std std-term"> + a +. + +-------------------------------- +ref (`sphinx.roles.XRefRole`): +. +{ref}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="ref" refwarn="True"> + <inline classes="xref std std-ref"> + a +. + +-------------------------------- +numref (`sphinx.roles.XRefRole`): +. +{numref}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="numref" refwarn="True"> + <literal classes="xref std std-numref"> + a +. + +-------------------------------- +keyword (`sphinx.roles.XRefRole`): +. +{keyword}`a` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="a" reftype="keyword" refwarn="True"> + <literal classes="xref std std-keyword"> + a +. + +-------------------------------- +doc (`sphinx.roles.XRefRole`): +. +{doc}`this lecture <heavy_tails>` +. +<document source="notset"> + <paragraph> + <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="True" reftarget="heavy_tails" reftype="doc" refwarn="True"> + <inline classes="xref std std-doc"> + this lecture +. diff --git a/tests/test_renderers/sphinx_directives.json b/tests/test_renderers/sphinx_directives.json deleted file mode 100644 index f79cb502..00000000 --- a/tests/test_renderers/sphinx_directives.json +++ /dev/null @@ -1,595 +0,0 @@ -[ - { - "name": "attention", - "class": "docutils.parsers.rst.directives.admonitions.Attention", - "args": [], - "options": {}, - "content": "a", - "output": "<attention>\n <paragraph>\n a" - }, - { - "name": "caution", - "class": "docutils.parsers.rst.directives.admonitions.Caution", - "args": [], - "options": {}, - "content": "a", - "output": "<caution>\n <paragraph>\n a" - }, - { - "name": "danger", - "class": "docutils.parsers.rst.directives.admonitions.Danger", - "args": [], - "options": {}, - "content": "a", - "output": "<danger>\n <paragraph>\n a" - }, - { - "name": "error", - "class": "docutils.parsers.rst.directives.admonitions.Error", - "args": [], - "options": {}, - "content": "a", - "output": "<error>\n <paragraph>\n a" - }, - { - "name": "important", - "class": "docutils.parsers.rst.directives.admonitions.Important", - "args": [], - "options": {}, - "content": "a", - "output": "<important>\n <paragraph>\n a" - }, - { - "name": "note", - "class": "docutils.parsers.rst.directives.admonitions.Note", - "args": [], - "options": {}, - "content": "a", - "output": "<note>\n <paragraph>\n a" - }, - { - "name": "tip", - "class": "docutils.parsers.rst.directives.admonitions.Tip", - "args": [], - "options": {}, - "content": "a", - "output": "<tip>\n <paragraph>\n a" - }, - { - "name": "hint", - "class": "docutils.parsers.rst.directives.admonitions.Hint", - "args": [], - "options": {}, - "content": "a", - "output": "<hint>\n <paragraph>\n a" - }, - { - "name": "warning", - "class": "docutils.parsers.rst.directives.admonitions.Warning", - "args": [], - "options": {}, - "content": "a", - "output": "<warning>\n <paragraph>\n a" - }, - { - "name": "admonition", - "class": "docutils.parsers.rst.directives.admonitions.Admonition", - "args": [ - "myclass" - ], - "options": {}, - "content": "a", - "output": "<admonition classes=\"admonition-myclass\">\n <title>\n myclass\n <paragraph>\n a" - }, - { - "name": "sidebar", - "class": "docutils.parsers.rst.directives.body.Sidebar", - "args": [ - "sidebar title" - ], - "options": {}, - "content": "a", - "output": "<sidebar>\n <title>\n sidebar title\n <paragraph>\n a" - }, - { - "name": "topic", - "class": "docutils.parsers.rst.directives.body.Topic", - "args": [ - "Topic Title" - ], - "options": {}, - "content": "a", - "output": "<topic>\n <title>\n Topic Title\n <paragraph>\n a" - }, - { - "name": "line-block", - "class": "docutils.parsers.rst.directives.body.LineBlock", - "args": [], - "options": {}, - "output": "" - }, - { - "name": "parsed-literal", - "class": "docutils.parsers.rst.directives.body.ParsedLiteral", - "args": [], - "options": {}, - "content": "a", - "output": "<literal_block xml:space=\"preserve\">\n a" - }, - { - "name": "rubric", - "class": "docutils.parsers.rst.directives.body.Rubric", - "args": [ - "Rubric Title" - ], - "options": {}, - "output": "<rubric>\n Rubric Title" - }, - { - "name": "epigraph", - "class": "docutils.parsers.rst.directives.body.Epigraph", - "args": [], - "options": {}, - "content": "a\n\n-- attribution", - "output": "<block_quote classes=\"epigraph\">\n <paragraph>\n a\n <attribution>\n attribution" - }, - { - "name": "highlights", - "class": "docutils.parsers.rst.directives.body.Highlights", - "args": [], - "options": {}, - "content": "a\n\n-- attribution", - "output": "<block_quote classes=\"highlights\">\n <paragraph>\n a\n <attribution>\n attribution" - }, - { - "name": "pull-quote", - "content": "a\n\n-- attribution", - "class": "docutils.parsers.rst.directives.body.PullQuote", - "args": [], - "options": {}, - "content": "a\n\n-- attribution", - "output": "<block_quote classes=\"pull-quote\">\n <paragraph>\n a\n <attribution>\n attribution" - }, - { - "name": "compound", - "class": "docutils.parsers.rst.directives.body.Compound", - "args": [], - "options": {}, - "content": "a", - "output": "<compound>\n <paragraph>\n a" - }, - { - "name": "container", - "class": "docutils.parsers.rst.directives.body.Container", - "args": [], - "options": {}, - "content": "a", - "output": "<container>\n <paragraph>\n a" - }, - { - "name": "image", - "class": "docutils.parsers.rst.directives.images.Image", - "args": [ - "path/to/image" - ], - "options": {}, - "output": "<image uri=\"path/to/image\">" - }, - { - "name": "cssclass", - "class": "docutils.parsers.rst.directives.misc.Class", - "args": [], - "options": {}, - "output": "" - }, - { - "name": "rst-class", - "class": "docutils.parsers.rst.directives.misc.Class", - "args": [], - "options": {}, - "output": "" - }, - { - "name": "raw", - "class": "docutils.parsers.rst.directives.misc.Raw", - "args": [ - "raw" - ], - "options": {}, - "content": "a", - "output": "<raw format=\"raw\" xml:space=\"preserve\">\n a" - }, - { - "name": "replace", - "class": "docutils.parsers.rst.directives.misc.Replace", - "args": [], - "options": {}, - "output": "", - "sub_only": true - }, - { - "name": "unicode", - "class": "docutils.parsers.rst.directives.misc.Unicode", - "args": [], - "options": {}, - "output": "", - "sub_only": true - }, - { - "name": "class", - "class": "docutils.parsers.rst.directives.misc.Class", - "args": [ - "myclass" - ], - "options": {}, - "content": "a", - "output": "<paragraph classes=\"myclass\">\n a" - }, - { - "name": "role", - "class": "docutils.parsers.rst.directives.misc.Role", - "args": [], - "content": "", - "options": {}, - "output": "" - }, - { - "name": "title", - "class": "docutils.parsers.rst.directives.misc.Title", - "args": [ - "title" - ], - "options": {}, - "output": "", - "doc_tag": "<document source=\"notset\" title=\"title\">" - }, - { - "name": "date", - "class": "docutils.parsers.rst.directives.misc.Date", - "args": [], - "options": {}, - "output": "", - "sub_only": true - }, - { - "name": "restructuredtext-test-directive", - "class": "docutils.parsers.rst.directives.misc.TestDirective", - "args": [], - "options": {}, - "output": "<system_message level=\"1\" line=\"1\" source=\"notset\" type=\"INFO\">\n <paragraph>\n Directive processed. Type=\"restructuredtext-test-directive\", arguments=[], options={}, content: None" - }, - { - "name": "contents", - "class": "docutils.parsers.rst.directives.parts.Contents", - "args": [], - "options": {}, - "output": "<topic classes=\"contents\" ids=\"contents\" names=\"contents\">\n <title>\n Contents\n <pending>\n .. internal attributes:\n .transform: docutils.transforms.parts.Contents\n .details:" - }, - { - "name": "sectnum", - "class": "docutils.parsers.rst.directives.parts.Sectnum", - "args": [], - "options": {}, - "output": "<pending>\n .. internal attributes:\n .transform: docutils.transforms.parts.SectNum\n .details:" - }, - { - "name": "header", - "class": "docutils.parsers.rst.directives.parts.Header", - "args": [], - "options": {}, - "content": "a", - "output": "<decoration>\n <header>\n <paragraph>\n a" - }, - { - "name": "footer", - "class": "docutils.parsers.rst.directives.parts.Footer", - "args": [], - "options": {}, - "content": "a", - "output": "<decoration>\n <footer>\n <paragraph>\n a" - }, - { - "name": "target-notes", - "class": "docutils.parsers.rst.directives.references.TargetNotes", - "args": [], - "options": {}, - "output": "<pending>\n .. internal attributes:\n .transform: docutils.transforms.references.TargetNotes\n .details:" - }, - { - "name": "default-role", - "class": "sphinx.directives.DefaultRole", - "args": [], - "options": {}, - "output": "" - }, - { - "name": "default-domain", - "class": "sphinx.directives.DefaultDomain", - "args": [ - "mydomain" - ], - "options": {}, - "output": "" - }, - { - "name": "describe", - "class": "sphinx.directives.ObjectDescription", - "args": [ - "something" - ], - "options": {}, - "output": "<index entries=\"\">\n<desc desctype=\"describe\" domain=\"\" noindex=\"False\" objtype=\"describe\">\n <desc_signature first=\"False\">\n <desc_name xml:space=\"preserve\">\n something\n <desc_content>" - }, - { - "name": "object", - "class": "sphinx.directives.ObjectDescription", - "args": [ - "something" - ], - "options": {}, - "output": "<index entries=\"\">\n<desc desctype=\"object\" domain=\"\" noindex=\"False\" objtype=\"object\">\n <desc_signature first=\"False\">\n <desc_name xml:space=\"preserve\">\n something\n <desc_content>" - }, - { - "name": "highlight", - "class": "sphinx.directives.code.Highlight", - "args": [ - "something" - ], - "options": {}, - "output": "<highlightlang force=\"False\" lang=\"something\" linenothreshold=\"9223372036854775807\">" - }, - { - "name": "code-block", - "class": "sphinx.directives.code.CodeBlock", - "args": [], - "options": {}, - "content": "a=1", - "output": "<literal_block force=\"False\" highlight_args=\"{}\" language=\"default\" xml:space=\"preserve\">\n a=1" - }, - { - "name": "sourcecode", - "class": "sphinx.directives.code.CodeBlock", - "args": [], - "options": {}, - "output": "<literal_block force=\"False\" highlight_args=\"{}\" language=\"default\" xml:space=\"preserve\">" - }, - { - "name": "literalinclude", - "class": "sphinx.directives.code.LiteralInclude", - "args": [ - "/path/to/file" - ], - "options": {}, - "output": "<system_message level=\"2\" line=\"0\" source=\"notset\" type=\"WARNING\">\n <paragraph>\n Include file '/srcdir/path/to/file' not found or reading it failed" - }, - { - "name": "toctree", - "class": "sphinx.directives.other.TocTree", - "args": [], - "options": {}, - "output": "<compound classes=\"toctree-wrapper\">\n <toctree caption=\"True\" entries=\"\" glob=\"False\" hidden=\"False\" includefiles=\"\" includehidden=\"False\" maxdepth=\"-1\" numbered=\"0\" parent=\"mock_docname\" titlesonly=\"False\">" - }, - { - "name": "sectionauthor", - "class": "sphinx.directives.other.Author", - "args": [ - "bob geldof" - ], - "options": {}, - "output": "" - }, - { - "name": "moduleauthor", - "class": "sphinx.directives.other.Author", - "args": [ - "ringo starr" - ], - "options": {}, - "output": "" - }, - { - "name": "codeauthor", - "class": "sphinx.directives.other.Author", - "args": [ - "paul mcartney" - ], - "options": {}, - "output": "" - }, - { - "name": "index", - "class": "sphinx.directives.other.Index", - "args": [ - "something" - ], - "options": {}, - "output": "<index entries=\"('single',\\ 'something',\\ 'index-0',\\ '',\\ None)\" inline=\"False\">\n<target ids=\"index-0\">" - }, - { - "name": "seealso", - "class": "sphinx.directives.other.SeeAlso", - "args": [], - "options": {}, - "content": "a", - "output": "<seealso>\n <paragraph>\n a" - }, - { - "name": "tabularcolumns", - "class": "sphinx.directives.other.TabularColumns", - "args": [ - "spec" - ], - "options": {}, - "output": "<tabular_col_spec spec=\"spec\">" - }, - { - "name": "centered", - "class": "sphinx.directives.other.Centered", - "args": [ - "text" - ], - "options": {}, - "output": "<centered>\n text" - }, - { - "name": "acks", - "class": "sphinx.directives.other.Acks", - "args": [], - "options": {}, - "content": "- name", - "output": "<acks>\n <bullet_list>\n <list_item>\n <paragraph>\n name" - }, - { - "name": "hlist", - "class": "sphinx.directives.other.HList", - "args": [], - "options": {}, - "content": "- item", - "output": "<hlist>\n <hlistcol>\n <bullet_list>\n <list_item>\n <paragraph>\n item\n <hlistcol>\n <bullet_list>" - }, - { - "name": "only", - "class": "sphinx.directives.other.Only", - "args": [ - "expr" - ], - "options": {}, - "output": "<only expr=\"expr\">" - }, - { - "name": "include", - "class": "sphinx.directives.other.Include", - "args": [ - "path/to/include" - ], - "options": {}, - "output": "" - }, - { - "name": "figure", - "class": "sphinx.directives.patches.Figure", - "args": [ - "path/to/figure" - ], - "options": {}, - "content": "caption\n\nlegend", - "output": "<figure>\n <image uri=\"path/to/figure\">\n <caption>\n caption\n <legend>\n <paragraph>\n legend" - }, - { - "name": "meta", - "class": "sphinx.directives.patches.Meta", - "args": [], - "options": {}, - "output": "" - }, - { - "name": "table", - "class": "sphinx.directives.patches.RSTTable", - "args": [], - "options": {}, - "content": "| a | b |\n|---|---|\n| 1 | 2 |", - "output": "<table classes=\"colwidths-auto\">\n <tgroup cols=\"2\">\n <colspec colwidth=\"50.0\">\n <colspec colwidth=\"50.0\">\n <thead>\n <row>\n <entry>\n a\n <entry>\n b\n <tbody>\n <row>\n <entry>\n 1\n <entry>\n 2" - }, - { - "name": "csv-table", - "class": "sphinx.directives.patches.CSVTable", - "args": [], - "options": {"header": "\"Treat\", \"Quantity\", \"Description\""}, - "content": "\"Albatross\", 2.99, \"On a stick!\"", - "output": "<table>\n <tgroup cols=\"3\">\n <colspec colwidth=\"33\">\n <colspec colwidth=\"33\">\n <colspec colwidth=\"33\">\n <tbody>\n <row>\n <entry>\n <paragraph>\n Albatross\n <entry>\n <paragraph>\n 2.99\n <entry>\n <paragraph>\n On a stick!" - }, - { - "name": "list-table", - "class": "sphinx.directives.patches.ListTable", - "args": [], - "options": {}, - "content": "* - item", - "output": "<table>\n <tgroup cols=\"1\">\n <colspec colwidth=\"100\">\n <tbody>\n <row>\n <entry>\n <paragraph>\n item" - }, - { - "name": "code", - "class": "sphinx.directives.patches.Code", - "args": [ - "python" - ], - "options": {}, - "content": "a", - "output": "<literal_block force=\"False\" highlight_args=\"{}\" language=\"python\" xml:space=\"preserve\">\n a" - }, - { - "name": "math", - "class": "sphinx.directives.patches.MathDirective", - "args": [], - "options": {}, - "output": "<math_block docname=\"mock_docname\" label=\"True\" nowrap=\"False\" number=\"True\" xml:space=\"preserve\">" - }, - { - "name": "deprecated", - "class": "sphinx.domains.changeset.VersionChange", - "args": [ - "0.3" - ], - "options": {}, - "output": "<versionmodified type=\"deprecated\" version=\"0.3\">\n <paragraph translatable=\"False\">\n <inline classes=\"versionmodified deprecated\">\n Deprecated since version 0.3." - }, - { - "name": "versionadded", - "class": "sphinx.domains.changeset.VersionChange", - "args": [ - "0.2" - ], - "options": {}, - "output": "<versionmodified type=\"versionadded\" version=\"0.2\">\n <paragraph translatable=\"False\">\n <inline classes=\"versionmodified added\">\n New in version 0.2." - }, - { - "name": "versionchanged", - "class": "sphinx.domains.changeset.VersionChange", - "args": [ - "0.1" - ], - "options": {}, - "output": "<versionmodified type=\"versionchanged\" version=\"0.1\">\n <paragraph translatable=\"False\">\n <inline classes=\"versionmodified changed\">\n Changed in version 0.1." - }, - { - "name": "glossary", - "class": "sphinx.domains.std.Glossary", - "args": [], - "options": {}, - "content": "term 1 : A\nterm 2 : B\n Definition of both terms.", - "output": "<glossary>\n <definition_list classes=\"glossary\">\n <definition_list_item>\n <term ids=\"term-term-1\">\n term 1\n <index entries=\"('single',\\ 'term\\ 1',\\ 'term-term-1',\\ 'main',\\ 'A')\">\n <term ids=\"term-term-2\">\n term 2\n <index entries=\"('single',\\ 'term\\ 2',\\ 'term-term-2',\\ 'main',\\ 'B')\">\n <definition>\n <paragraph>\n Definition of both terms." - }, - { - "name": "productionlist", - "class": "sphinx.domains.std.ProductionList", - "args": [ - "try_stmt: try1_stmt | try2_stmt" - ], - "options": {}, - "output": "<productionlist>\n <production ids=\"grammar-token-try-stmt\" tokenname=\"try_stmt\" xml:space=\"preserve\">\n try1_stmt | try2_stmt" - }, - { - "name": "cmdoption", - "class": "sphinx.domains.std.Cmdoption", - "args": ["a"], - "options": {}, - "output": "<index entries=\"('pair',\\ 'command\\ line\\ option;\\ a',\\ 'cmdoption-arg-a',\\ '',\\ None)\">\n<desc desctype=\"cmdoption\" domain=\"std\" noindex=\"False\" objtype=\"cmdoption\">\n <desc_signature allnames=\"a\" first=\"False\" ids=\"cmdoption-arg-a\" names=\"cmdoption-arg-a\">\n <desc_name xml:space=\"preserve\">\n a\n <desc_addname xml:space=\"preserve\">\n <desc_content>" - }, - { - "name": "rst:directive", - "class": "sphinx.domains.rst.ReSTDirective", - "args": [ - "a" - ], - "options": {}, - "output": "<index entries=\"('single',\\ 'a\\ (directive)',\\ 'directive-a',\\ '',\\ None)\">\n<desc desctype=\"directive\" domain=\"rst\" noindex=\"False\" objtype=\"directive\">\n <desc_signature first=\"False\" ids=\"directive-a\" names=\"directive-a\">\n <desc_name xml:space=\"preserve\">\n .. a::\n <desc_content>" - }, - { - "name": "rst:directive:option", - "class": "sphinx.domains.rst.ReSTDirectiveOption", - "args": [ - "a" - ], - "options": {}, - "output": "<index entries=\"('single',\\ ':a:\\ (directive\\ option)',\\ 'directive:option--a',\\ '',\\ 'A')\">\n<desc desctype=\"directive:option\" domain=\"rst\" noindex=\"False\" objtype=\"directive:option\">\n <desc_signature first=\"False\" ids=\"directive:option--a\" names=\"directive:option--a\">\n <desc_name xml:space=\"preserve\">\n :a:\n <desc_content>" - } -] diff --git a/tests/test_renderers/sphinx_roles.json b/tests/test_renderers/sphinx_roles.json deleted file mode 100644 index 8dd04d47..00000000 --- a/tests/test_renderers/sphinx_roles.json +++ /dev/null @@ -1,525 +0,0 @@ -[ - { - "name": "abbreviation", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <abbreviation>" - }, - { - "name": "acronym", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <acronym>" - }, - { - "name": "emphasis", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <emphasis>" - }, - { - "name": "literal", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <literal>" - }, - { - "name": "strong", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <strong>" - }, - { - "name": "subscript", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <subscript>" - }, - { - "name": "superscript", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "output": "<paragraph>\n <superscript>" - }, - { - "name": "title-reference", - "import": "docutils.parsers.rst.roles.GenericRole", - "type": "class", - "content": "t", - "output": "<paragraph>\n <title_reference>\n t" - }, - { - "name": "pep-reference", - "import": "docutils.parsers.rst.roles.pep_reference_role", - "type": "function", - "content": "0", - "output": "<paragraph>\n <reference refuri=\"http://www.python.org/dev/peps/pep-0000\">\n PEP 0" - }, - { - "name": "rfc-reference", - "import": "docutils.parsers.rst.roles.rfc_reference_role", - "type": "function", - "content": "1", - "output": "<paragraph>\n <reference refuri=\"http://tools.ietf.org/html/rfc1.html\">\n RFC 1" - }, - { - "name": "raw", - "import": "docutils.parsers.rst.roles.raw_role", - "type": "function", - "output": "" - }, - { - "name": "code", - "import": "docutils.parsers.rst.roles.code_role", - "type": "function", - "output": "<paragraph>\n <literal classes=\"code\">" - }, - { - "name": "math", - "import": "docutils.parsers.rst.roles.math_role", - "type": "function", - "output": "<paragraph>\n <math>" - }, - { - "name": "named-reference", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "anonymous-reference", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "uri-reference", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "footnote-reference", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "citation-reference", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "substitution-reference", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "target", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "restructuredtext-unimplemented-role", - "import": "docutils.parsers.rst.roles.unimplemented_role", - "type": "function", - "output": "" - }, - { - "name": "command", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "dfn", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "kbd", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "mailheader", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "makevar", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "manpage", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "mimetype", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "newsgroup", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "regexp", - "import": "docutils.parsers.rst.roles.CustomRole", - "type": "class", - "output": "" - }, - { - "name": "c:func", - "import": "sphinx.domains.c.CXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"c\" refexplicit=\"False\" reftarget=\"a\" reftype=\"func\" refwarn=\"False\">\n <literal classes=\"xref c c-func\">\n a()" - }, - { - "name": "c:member", - "import": "sphinx.domains.c.CObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"c\" refexplicit=\"False\" reftarget=\"a\" reftype=\"member\" refwarn=\"False\">\n <literal classes=\"xref c c-member\">\n a" - }, - { - "name": "c:macro", - "import": "sphinx.domains.c.CObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"c\" refexplicit=\"False\" reftarget=\"a\" reftype=\"macro\" refwarn=\"False\">\n <literal classes=\"xref c c-macro\">\n a" - }, - { - "name": "c:data", - "import": "sphinx.domains.c.CXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"c\" refexplicit=\"False\" reftarget=\"a\" reftype=\"data\" refwarn=\"False\">\n <literal classes=\"xref c c-data\">\n a" - }, - { - "name": "c:type", - "import": "sphinx.domains.c.CObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"c\" refexplicit=\"False\" reftarget=\"a\" reftype=\"type\" refwarn=\"False\">\n <literal classes=\"xref c c-type\">\n a" - }, - { - "name": "cpp:any", - "import": "sphinx.domains.cpp.CPPXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"any\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-any\">\n a" - }, - { - "name": "cpp:class", - "import": "sphinx.domains.cpp.CPPClassObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"class\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-class\">\n a" - }, - { - "name": "cpp:struct", - "import": "sphinx.domains.cpp.CPPClassObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"struct\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-struct\">\n a" - }, - { - "name": "cpp:union", - "import": "sphinx.domains.cpp.CPPUnionObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"union\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-union\">\n a" - }, - { - "name": "cpp:func", - "import": "sphinx.domains.cpp.CPPXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"func\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-func\">\n a()" - }, - { - "name": "cpp:member", - "import": "sphinx.domains.cpp.CPPMemberObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"member\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-member\">\n a" - }, - { - "name": "cpp:var", - "import": "sphinx.domains.cpp.CPPMemberObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"var\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-var\">\n a" - }, - { - "name": "cpp:type", - "import": "sphinx.domains.cpp.CPPTypeObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"type\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-type\">\n a" - }, - { - "name": "cpp:concept", - "import": "sphinx.domains.cpp.CPPConceptObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"concept\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-concept\">\n a" - }, - { - "name": "cpp:enum", - "import": "sphinx.domains.cpp.CPPEnumObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"enum\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-enum\">\n a" - }, - { - "name": "cpp:enumerator", - "import": "sphinx.domains.cpp.CPPEnumeratorObject", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"cpp\" refexplicit=\"False\" reftarget=\"a\" reftype=\"enumerator\" refwarn=\"False\">\n <literal classes=\"xref cpp cpp-enumerator\">\n a" - }, - { - "name": "cpp:expr", - "import": "sphinx.domains.cpp.CPPExprRole", - "type": "class", - "output": "<paragraph>\n <literal classes=\"xref cpp cpp-expr\">\n <pending_xref classname=\"True\" cpp:parent_key=\"\" modname=\"True\" refdomain=\"cpp\" reftarget=\"a\" reftype=\"identifier\">\n a" - }, - { - "name": "cpp:texpr", - "import": "sphinx.domains.cpp.CPPExprRole", - "type": "class", - "output": "<paragraph>\n <inline classes=\"xref cpp cpp-texpr\">\n <pending_xref classname=\"True\" cpp:parent_key=\"\" modname=\"True\" refdomain=\"cpp\" reftarget=\"a\" reftype=\"identifier\">\n a" - }, - { - "name": "js:func", - "import": "sphinx.domains.javascript.JSXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref js:module=\"True\" js:object=\"True\" refdoc=\"mock_docname\" refdomain=\"js\" refexplicit=\"False\" reftarget=\"a\" reftype=\"func\" refwarn=\"False\">\n <literal classes=\"xref js js-func\">\n a()" - }, - { - "name": "js:meth", - "import": "sphinx.domains.javascript.JSXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref js:module=\"True\" js:object=\"True\" refdoc=\"mock_docname\" refdomain=\"js\" refexplicit=\"False\" reftarget=\"a\" reftype=\"meth\" refwarn=\"False\">\n <literal classes=\"xref js js-meth\">\n a()" - }, - { - "name": "js:class", - "import": "sphinx.domains.javascript.JSConstructor", - "type": "function", - "output": "<paragraph>\n <pending_xref js:module=\"True\" js:object=\"True\" refdoc=\"mock_docname\" refdomain=\"js\" refexplicit=\"False\" reftarget=\"a\" reftype=\"class\" refwarn=\"False\">\n <literal classes=\"xref js js-class\">\n a()" - }, - { - "name": "js:data", - "import": "sphinx.domains.javascript.JSObject", - "type": "function", - "output": "<paragraph>\n <pending_xref js:module=\"True\" js:object=\"True\" refdoc=\"mock_docname\" refdomain=\"js\" refexplicit=\"False\" reftarget=\"a\" reftype=\"data\" refwarn=\"False\">\n <literal classes=\"xref js js-data\">\n a" - }, - { - "name": "js:attr", - "import": "sphinx.domains.javascript.JSXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref js:module=\"True\" js:object=\"True\" refdoc=\"mock_docname\" refdomain=\"js\" refexplicit=\"False\" reftarget=\"a\" reftype=\"attr\" refwarn=\"False\">\n <literal classes=\"xref js js-attr\">\n a" - }, - { - "name": "js:mod", - "import": "sphinx.domains.javascript.JSXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref js:module=\"True\" js:object=\"True\" refdoc=\"mock_docname\" refdomain=\"js\" refexplicit=\"False\" reftarget=\"a\" reftype=\"mod\" refwarn=\"False\">\n <literal classes=\"xref js js-mod\">\n a" - }, - { - "name": "eq", - "import": "sphinx.domains.math.MathReferenceRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"math\" refexplicit=\"False\" reftarget=\"a\" reftype=\"eq\" refwarn=\"True\">\n <literal classes=\"xref eq\">\n a" - }, - { - "name": "math:numref", - "import": "sphinx.domains.math.MathReferenceRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"math\" refexplicit=\"False\" reftarget=\"a\" reftype=\"numref\" refwarn=\"False\">\n <literal classes=\"xref math math-numref\">\n a" - }, - { - "name": "py:data", - "import": "sphinx.domains.python.PyVariable", - "type": "function", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"data\" refwarn=\"False\">\n <literal classes=\"xref py py-data\">\n a" - }, - { - "name": "py:exc", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"exc\" refwarn=\"False\">\n <literal classes=\"xref py py-exc\">\n a" - }, - { - "name": "py:func", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"func\" refwarn=\"False\">\n <literal classes=\"xref py py-func\">\n a()" - }, - { - "name": "py:class", - "import": "sphinx.domains.python.PyClasslike", - "type": "function", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"class\" refwarn=\"False\">\n <literal classes=\"xref py py-class\">\n a" - }, - { - "name": "py:const", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"const\" refwarn=\"False\">\n <literal classes=\"xref py py-const\">\n a" - }, - { - "name": "py:attr", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"attr\" refwarn=\"False\">\n <literal classes=\"xref py py-attr\">\n a" - }, - { - "name": "py:meth", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"meth\" refwarn=\"False\">\n <literal classes=\"xref py py-meth\">\n a()" - }, - { - "name": "py:mod", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"mod\" refwarn=\"False\">\n <literal classes=\"xref py py-mod\">\n a" - }, - { - "name": "py:obj", - "import": "sphinx.domains.python.PyXRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref py:class=\"True\" py:module=\"True\" refdoc=\"mock_docname\" refdomain=\"py\" refexplicit=\"False\" reftarget=\"a\" reftype=\"obj\" refwarn=\"False\">\n <literal classes=\"xref py py-obj\">\n a" - }, - { - "name": "rst:role", - "import": "sphinx.domains.rst.ReSTRole", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"rst\" refexplicit=\"False\" reftarget=\"a\" reftype=\"role\" refwarn=\"False\">\n <literal classes=\"xref rst rst-role\">\n a" - }, - { - "name": "program", - "import": "sphinx.domains.std.Program", - "type": "function", - "output": "<paragraph>\n <literal_strong classes=\"program\">\n a" - }, - { - "name": "option", - "import": "sphinx.domains.std.Cmdoption", - "type": "function", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"option\" refwarn=\"True\" std:program=\"True\">\n <literal classes=\"xref std std-option\">\n a" - }, - { - "name": "envvar", - "import": "sphinx.domains.std.EnvVarXRefRole", - "type": "function", - "output": "<paragraph>\n <index entries=\"('single',\\ 'a',\\ 'index-0',\\ '',\\ None) ('single',\\ 'environment\\ variable;\\ a',\\ 'index-0',\\ '',\\ None)\">\n <target ids=\"index-0\">\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"envvar\" refwarn=\"False\">\n <literal classes=\"xref std std-envvar\">\n a" - }, - { - "name": "index", - "import": "sphinx.roles.Index", - "type": "class", - "output": "<paragraph>\n <index entries=\"('single',\\ 'a',\\ 'index-0',\\ '',\\ None)\">\n <target ids=\"index-0\">\n a" - }, - { - "name": "download", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <download_reference refdoc=\"mock_docname\" refdomain=\"\" refexplicit=\"False\" reftarget=\"a\" reftype=\"download\" refwarn=\"False\">\n <literal classes=\"xref download\">\n a" - }, - { - "name": "any", - "import": "sphinx.roles.AnyXRefRole", - "type": "class", - "content": "a <alt text>", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"\" refexplicit=\"True\" reftarget=\"alt text\" reftype=\"any\" refwarn=\"True\">\n <literal classes=\"xref any\">\n a" - }, - { - "name": "pep", - "import": "sphinx.roles.PEP", - "type": "class", - "content": "1", - "output": "<paragraph>\n <index entries=\"('single',\\ 'Python\\ Enhancement\\ Proposals;\\ PEP\\ 1',\\ 'index-0',\\ '',\\ None)\">\n <target ids=\"index-0\">\n <reference classes=\"pep\" internal=\"False\" refuri=\"http://www.python.org/dev/peps/pep-0001\">\n <strong>\n PEP 1" - }, - { - "name": "rfc", - "import": "sphinx.roles.RFC", - "type": "class", - "content": "1", - "output": "<paragraph>\n <index entries=\"('single',\\ 'RFC;\\ RFC\\ 1',\\ 'index-0',\\ '',\\ None)\">\n <target ids=\"index-0\">\n <reference classes=\"rfc\" internal=\"False\" refuri=\"http://tools.ietf.org/html/rfc1.html\">\n <strong>\n RFC 1" - }, - { - "name": "guilabel", - "import": "sphinx.roles.GUILabel", - "type": "class", - "output": "<paragraph>\n <inline classes=\"guilabel\" rawtext=\":guilabel:`a`\">\n a" - }, - { - "name": "menuselection", - "import": "sphinx.roles.MenuSelection", - "type": "class", - "output": "<paragraph>\n <inline classes=\"menuselection\" rawtext=\":menuselection:`a`\">\n a" - }, - { - "name": "file", - "import": "sphinx.roles.EmphasizedLiteral", - "type": "class", - "output": "<paragraph>\n <literal classes=\"file\" role=\"file\">\n a" - }, - { - "name": "samp", - "import": "sphinx.roles.EmphasizedLiteral", - "type": "class", - "output": "<paragraph>\n <literal classes=\"samp\" role=\"samp\">\n a" - }, - { - "name": "abbr", - "import": "sphinx.roles.Abbreviation", - "type": "class", - "output": "<paragraph>\n <abbreviation>\n a" - }, - { - "name": "rst:dir", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"rst\" refexplicit=\"False\" reftarget=\"a\" reftype=\"dir\" refwarn=\"False\">\n <literal classes=\"xref rst rst-dir\">\n a" - }, - { - "name": "token", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"token\" refwarn=\"False\">\n <literal classes=\"xref std std-token\">\n a" - }, - { - "name": "term", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"term\" refwarn=\"True\">\n <inline classes=\"xref std std-term\">\n a" - }, - { - "name": "ref", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"ref\" refwarn=\"True\">\n <inline classes=\"xref std std-ref\">\n a" - }, - { - "name": "numref", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"numref\" refwarn=\"True\">\n <literal classes=\"xref std std-numref\">\n a" - }, - { - "name": "keyword", - "import": "sphinx.roles.XRefRole", - "type": "class", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"False\" reftarget=\"a\" reftype=\"keyword\" refwarn=\"True\">\n <literal classes=\"xref std std-keyword\">\n a" - }, - { - "name": "doc", - "import": "sphinx.roles.XRefRole", - "type": "class", - "content": "this lecture <heavy_tails>", - "output": "<paragraph>\n <pending_xref refdoc=\"mock_docname\" refdomain=\"std\" refexplicit=\"True\" reftarget=\"heavy_tails\" reftype=\"doc\" refwarn=\"True\">\n <inline classes=\"xref std std-doc\">\n this lecture" - } -] diff --git a/tests/test_renderers/test_docutils.py b/tests/test_renderers/test_docutils.py deleted file mode 100644 index b7ad1ec2..00000000 --- a/tests/test_renderers/test_docutils.py +++ /dev/null @@ -1,476 +0,0 @@ -from textwrap import dedent -from unittest import mock - -from mistletoe.block_tokenizer import tokenize_main -from mistletoe.span_tokenizer import tokenize_span - -from myst_parser import text_to_tokens, render_tokens, parse_text -from myst_parser.block_tokens import Document -from myst_parser.docutils_renderer import SphinxRenderer - - -def render_token( - renderer_mock, token_name, children=True, without_attrs=None, **kwargs -): - render_func = renderer_mock.render_map[token_name] - children = mock.MagicMock(spec=list) if children else None - if "position" not in kwargs: - kwargs["position"] = (0, 0) - mock_token = mock.Mock(children=children, **kwargs) - without_attrs = without_attrs or [] - for attr in without_attrs: - delattr(mock_token, attr) - render_func(mock_token) - - -def test_text_to_tokens(): - root = text_to_tokens("abc") - document = render_tokens( - root, - SphinxRenderer, - load_sphinx_env=True, - sphinx_conf={"project": "MyST Parser"}, - ) - assert document.pformat() == ( - '<document source="notset">\n <paragraph>\n abc\n' - ) - - -def test_parse_text(): - document = parse_text( - "abc", "sphinx", load_sphinx_env=True, sphinx_conf={"project": "MyST Parser"} - ) - assert document.pformat() == ( - '<document source="notset">\n <paragraph>\n abc\n' - ) - - -def test_strong(renderer_mock): - render_token(renderer_mock, "Strong") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <strong> - """ - ) - - -def test_emphasis(renderer_mock): - render_token(renderer_mock, "Emphasis") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <emphasis> - """ - ) - - -def test_raw_text(renderer_mock): - render_token(renderer_mock, "RawText", children=False, content="john & jane") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - john & jane - """ - ) - - -def test_inline_code(renderer_mock): - renderer_mock.render(tokenize_span("`foo`")[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <literal> - foo - """ - ) - - -def test_paragraph(renderer_mock): - render_token(renderer_mock, "Paragraph", position=(0, 1)) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - """ - ) - - -def test_heading(renderer_mock): - render_token(renderer_mock, "Heading", level=1, position=(0, 0)) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <section ids="id1" names=""> - <title> - """ - ) - - -def test_block_code(renderer_mock): - - renderer_mock.render(tokenize_main(["```sh\n", "foo\n", "```\n"])[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <literal_block language="sh" xml:space="preserve"> - foo - """ - ) - - -def test_block_code_no_language(renderer_mock): - - renderer_mock.render(tokenize_main(["```\n", "foo\n", "```\n"])[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <literal_block language="" xml:space="preserve"> - foo - """ - ) - - -def test_image(renderer_mock): - render_token(renderer_mock, "Image", src="src", title="title") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <image alt="" uri="src"> - """ - ) - - -def test_image_with_alt(renderer_mock): - renderer_mock.render(tokenize_main([r"![alt](path/to/image.jpeg)"])[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - <image alt="alt" uri="path/to/image.jpeg"> - """ - ) - - -def test_quote(renderer_mock): - render_token(renderer_mock, "Quote", position=(0, 0)) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <block_quote> - """ - ) - - -def test_bullet_list(renderer_mock): - render_token(renderer_mock, "List", start_at=None) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <bullet_list> - """ - ) - - -def test_enumerated_list(renderer_mock): - render_token(renderer_mock, "List", start_at=1) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <enumerated_list> - """ - ) - - -def test_list_item(renderer_mock): - render_token(renderer_mock, "ListItem") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <list_item> - """ - ) - - -def test_math(renderer_mock): - render_token(renderer_mock, "Math", content="$a$") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <math> - a - """ - ) - - -def test_math_block(renderer_mock): - render_token(renderer_mock, "Math", content="$$a$$") - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <math_block nowrap="False" number="True" xml:space="preserve"> - a - """ - ) - - -def test_role_code(renderer_mock): - renderer_mock.render(tokenize_span("{code}`` a=1{`} ``")[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <literal classes="code"> - a=1{`} - """ - ) - - -def test_target_block(renderer_mock): - renderer_mock.render(tokenize_main(["(target)="])[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <target ids="target" names="target"> - """ - ) - - -def test_target_inline(renderer_mock): - renderer_mock.render(tokenize_main(["A b(target)="])[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - A b - <target ids="target" names="target"> - """ - ) - - -def test_cross_referencing(sphinx_renderer, file_regression): - string = dedent( - """\ - (target)= - - Title - ----- - - [alt1](target) - - [](target2) - - [alt2](https://www.google.com) - - [alt3](#target3) - """ - ) - sphinx_renderer.render(Document.read(string)) - file_regression.check(sphinx_renderer.document.pformat(), extension=".xml") - - -def test_comment(renderer_mock): - renderer_mock.render(Document.read(["line 1", r"% a comment", "line 2"])) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - line 1 - <comment xml:space="preserve"> - a comment - <paragraph> - line 2 - """ - ) - - -def test_block_break(renderer_mock): - renderer_mock.render(tokenize_main(["+++ string"])[0]) - assert renderer_mock.document.pformat() == dedent( - """\ - <document source="notset"> - <comment classes="block_break" xml:space="preserve"> - string - """ - ) - - -def test_link_reference(renderer): - renderer.render( - Document.read( - ["[name][key]", "", '[key]: https://www.google.com "a title"', ""] - ) - ) - assert renderer.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - <reference refuri="https://www.google.com" title="a title"> - name - """ - ) - - -def test_link_reference_no_key(renderer): - renderer.render( - Document.read(["[name]", "", '[name]: https://www.google.com "a title"', ""]) - ) - assert renderer.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - <reference refuri="https://www.google.com" title="a title"> - name - """ - ) - - -def test_block_quotes(renderer): - renderer.render( - Document.read( - dedent( - """\ - ```{epigraph} - a b*c* - - -- a**b** - """ - ) - ) - ) - assert renderer.document.pformat() == dedent( - """\ - <document source="notset"> - <block_quote classes="epigraph"> - <paragraph> - a b - <emphasis> - c - <attribution> - a - <strong> - b - """ - ) - - -def test_link_def_in_directive(renderer): - renderer.render( - Document.read( - dedent( - """\ - ```{note} - [a] - ``` - - [a]: link - """ - ) - ) - ) - assert renderer.document.pformat() == dedent( - """\ - <document source="notset"> - <note> - <paragraph> - <pending_xref refdomain="True" refexplicit="True" reftarget="link" reftype="any" refwarn="True"> - <literal classes="xref any"> - a - """ # noqa: E501 - ) - - -def test_link_def_in_directive_nested(renderer, file_regression): - # TODO document or 'fix' the fact that [ref2] here isn't resolved - renderer.render( - Document.read( - dedent( - """\ - ```{note} - [ref1]: link - ``` - - ```{note} - [ref1] - [ref2] - ``` - - ```{note} - [ref2]: link - ``` - """ - ) - ) - ) - file_regression.check(renderer.document.pformat(), extension=".xml") - - -def test_footnotes(renderer): - renderer.render( - Document.read( - dedent( - """\ - [^a] - - [^a]: footnote*text* - """ - ) - ) - ) - print(renderer.document.pformat()) - assert renderer.document.pformat() == dedent( - """\ - <document source="notset"> - <paragraph> - <footnote_reference auto="1" ids="id1" refname="a"> - <transition> - <footnote auto="1" ids="a" names="a"> - <paragraph> - footnote - <emphasis> - text - """ - ) - - -def test_full_run(sphinx_renderer, file_regression): - string = dedent( - """\ - --- - a: 1 - --- - - (target)= - # header 1 - ## sub header 1 - - a *b* **c** `abc` \\* - - ## sub header 2 - - x y [a](http://www.xyz.com) z - - --- - - # header 2 - - ```::python {a=1} - a = 1 - ``` - - > abc - - - a - - b - - c - - 1. a - 2. b - 1. c - - {ref}`target` - - """ - ) - - sphinx_renderer.render(Document.read(string)) - file_regression.check(sphinx_renderer.document.pformat(), extension=".xml") diff --git a/tests/test_renderers/test_docutils/test_cross_referencing.xml b/tests/test_renderers/test_docutils/test_cross_referencing.xml deleted file mode 100644 index 8e0502ac..00000000 --- a/tests/test_renderers/test_docutils/test_cross_referencing.xml +++ /dev/null @@ -1,18 +0,0 @@ -<document source="notset"> - <target ids="target" names="target"> - <section ids="title" names="title"> - <title> - Title - <paragraph> - <pending_xref refdomain="True" refexplicit="True" reftarget="target" reftype="any" refwarn="True"> - <literal classes="xref any"> - alt1 - <paragraph> - <pending_xref refdomain="True" refexplicit="False" reftarget="target2" reftype="any" refwarn="True"> - <literal classes="xref any"> - <paragraph> - <reference refuri="https://www.google.com"> - alt2 - <paragraph> - <reference refuri="#target3"> - alt3 diff --git a/tests/test_renderers/test_docutils/test_full_run.xml b/tests/test_renderers/test_docutils/test_full_run.xml deleted file mode 100644 index feabe2c9..00000000 --- a/tests/test_renderers/test_docutils/test_full_run.xml +++ /dev/null @@ -1,69 +0,0 @@ -<document source="notset"> - <docinfo> - <field> - <field_name> - a - <field_body> - 1 - <target ids="target" names="target"> - <section ids="header-1" names="header\ 1"> - <title> - header 1 - <section ids="sub-header-1" names="sub\ header\ 1"> - <title> - sub header 1 - <paragraph> - a - <emphasis> - b - - <strong> - c - - <literal> - abc - - * - <section ids="sub-header-2" names="sub\ header\ 2"> - <title> - sub header 2 - <paragraph> - x y - <reference refuri="http://www.xyz.com"> - a - z - <transition> - <section ids="header-2" names="header\ 2"> - <title> - header 2 - <literal_block language="::python" xml:space="preserve"> - a = 1 - <block_quote> - <paragraph> - abc - <bullet_list> - <list_item> - <paragraph> - a - <list_item> - <paragraph> - b - <bullet_list> - <list_item> - <paragraph> - c - <enumerated_list> - <list_item> - <paragraph> - a - <list_item> - <paragraph> - b - <enumerated_list> - <list_item> - <paragraph> - c - <paragraph> - <pending_xref refdoc="mock_docname" refdomain="std" refexplicit="False" reftarget="target" reftype="ref" refwarn="True"> - <inline classes="xref std std-ref"> - target diff --git a/tests/test_renderers/test_docutils/test_link_def_in_directive_nested.xml b/tests/test_renderers/test_docutils/test_link_def_in_directive_nested.xml deleted file mode 100644 index 3864506f..00000000 --- a/tests/test_renderers/test_docutils/test_link_def_in_directive_nested.xml +++ /dev/null @@ -1,10 +0,0 @@ -<document source="notset"> - <note> - <note> - <paragraph> - <pending_xref refdomain="True" refexplicit="True" reftarget="link" reftype="any" refwarn="True"> - <literal classes="xref any"> - ref1 - - [ref2] - <note> diff --git a/tests/test_renderers/test_fixtures.py b/tests/test_renderers/test_fixtures.py new file mode 100644 index 00000000..9b0ae818 --- /dev/null +++ b/tests/test_renderers/test_fixtures.py @@ -0,0 +1,89 @@ +from pathlib import Path + +import pytest + +from markdown_it.utils import read_fixture_file +from myst_parser.main import to_docutils + +FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures") + + +@pytest.mark.parametrize( + "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("basic.md")) +) +def test_basic(line, title, input, expected): + document = to_docutils(input) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("role_options.md")), +) +def test_role_options(line, title, input, expected): + document = to_docutils(input) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("docutil_roles.md")), +) +def test_docutils_roles(line, title, input, expected): + document = to_docutils(input) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("docutil_directives.md")), +) +def test_docutils_directives(line, title, input, expected): + # TODO fix skipped directives + # TODO test domain directives + if title.startswith("SKIP"): + pytest.skip(title) + document = to_docutils(input) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("sphinx_directives.md")), +) +def test_sphinx_directives(line, title, input, expected): + # TODO fix skipped directives + # TODO test domain directives + if title.startswith("SKIP"): + pytest.skip(title) + document = to_docutils(input, in_sphinx_env=True) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("sphinx_roles.md")), +) +def test_sphinx_roles(line, title, input, expected): + if title.startswith("SKIP"): + pytest.skip(title) + document = to_docutils(input, in_sphinx_env=True) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) diff --git a/tests/test_renderers/test_html.py b/tests/test_renderers/test_html.py deleted file mode 100644 index 787b7fdd..00000000 --- a/tests/test_renderers/test_html.py +++ /dev/null @@ -1,128 +0,0 @@ -from textwrap import dedent - -import pytest - -from myst_parser import text_to_tokens, render_tokens, parse_text -from mistletoe.block_tokenizer import tokenize_main - -from myst_parser.html_renderer import HTMLRenderer - - -@pytest.fixture -def renderer(): - renderer = HTMLRenderer() - with renderer: - yield renderer - - -def test_render_tokens(): - root = text_to_tokens("abc") - assert render_tokens(root, HTMLRenderer) == "<p>abc</p>\n" - - -def test_math(renderer): - output = renderer.render(tokenize_main(["$a=1$"])[0]) - assert output == dedent("<p>$$a=1$$</p>") - - -def test_role(renderer): - output = renderer.render(tokenize_main(["{name}`content`"])[0]) - assert output == ( - '<p><span class="myst-role"><code>{name}content</code></span></p>' - ) - - -def test_directive(renderer): - output = renderer.render(tokenize_main(["```{name} arg\n", "foo\n", "```\n"])[0]) - assert output == dedent( - """\ - <div class="myst-directive"> - <pre><code>{name} arg - foo - </code></pre></span> - </div>""" - ) - - -def test_block_break(renderer): - output = renderer.render(text_to_tokens("+++ abc")) - assert output.splitlines() == [ - "<!-- myst-block-data abc -->", - '<hr class="myst-block-break" />', - ] - - -def test_line_comment(renderer): - output = renderer.render(tokenize_main([r"% abc"])[0]) - assert output == "<!-- abc -->" - - -def test_target(): - output = parse_text("(a)=", "html") - assert output == ( - '<p><a class="myst-target" href="#a" title="Permalink to here">(a)=</a></p>\n' - ) - - -def test_front_matter(renderer): - output = renderer.render(text_to_tokens("---\na: 1\nb: 2\nc: 3\n---")) - assert output.splitlines() == [ - '<div class="myst-front-matter"><pre><code class="language-yaml">a: 1', - "b: 2", - "c: 3", - "</code></pre></div>", - ] - - -def test_minimal_html_page(file_regression): - in_string = dedent( - """\ - --- - a: 1 - --- - (title-target)= - # title - - Abc $a=1$ {role}`content` then more text - - +++ my break - - ```{directive} args - :option: 1 - content - ``` - - ```python - def func(a): - print("{}".format(a)) - ``` - - % a comment - - [link to target](#title-target) - """ - ) - - out_string = parse_text( - in_string, - "html", - add_mathjax=True, - as_standalone=True, - add_css=dedent( - """\ - div.myst-front-matter { - border: 1px solid gray; - } - div.myst-directive { - background: lightgreen; - } - hr.myst-block-break { - border-top:1px dotted black; - } - span.myst-role { - background: lightgreen; - } - """ - ), - ) - file_regression.check(out_string, extension=".html") diff --git a/tests/test_renderers/test_html/test_minimal_html_page.html b/tests/test_renderers/test_html/test_minimal_html_page.html deleted file mode 100644 index cbdbdd14..00000000 --- a/tests/test_renderers/test_html/test_minimal_html_page.html +++ /dev/null @@ -1,44 +0,0 @@ -<!DOCTYPE html> -<html lang="en"> -<head> -<meta charset="utf-8"> -<title>Standalone HTML - - - -
a: 1
-
-

(title-target)=

-

title

-

Abc $$a=1$$ {role}content then more text

- -
-
-
{directive} args
-:option: 1
-content
-
-
-
def func(a):
-    print("{}".format(a))
-
- -

link to target

- - - - diff --git a/tests/test_renderers/test_roles_directives.py b/tests/test_renderers/test_roles_directives.py deleted file mode 100644 index 03153cc9..00000000 --- a/tests/test_renderers/test_roles_directives.py +++ /dev/null @@ -1,283 +0,0 @@ -"""In this module, parsing of role and directive blocks are tested. - -Tests are run against all roles/directives implemented internally in -docutils (v0.15) and sphinx (v2.1). -""" -import json -import os -import sys -from textwrap import dedent, indent - -import pytest - -from mistletoe.block_tokenizer import tokenize_main - -from myst_parser.block_tokens import Document - - -@pytest.mark.parametrize( - "name,arguments,body", - [ - ("no_arg_no_content", "", False), - ("no_arg_with_content", "", True), - ("one_arg_no_content", "a", False), - ("one_arg_with_content", "a", True), - ("two_arg_no_content", "a b", False), - ("two_arg_with_content", "a b", True), - ], -) -def test_directive_arguments(renderer, name, arguments, body): - content = ( - [ - "```{restructuredtext-test-directive}" - + (" " if arguments else "") - + arguments - ] - + (["content"] if body else []) - + ["```"] - ) - renderer.render(Document.read(content)) - expected = [ - '', - ' ', - " ", - ( - ' Directive processed. Type="restructuredtext-test-directive", ' - "arguments={args}, options={{}}, content:{content}".format( - args=[arguments] if arguments else [], content="" if body else " None" - ) - ), - ] - if body: - expected.extend( - [' ', " content"] - ) - expected.append("") - assert renderer.document.pformat() == "\n".join(expected) - - -@pytest.mark.parametrize( - "type,text", [("no_init_space", ("content",)), ("with_init_space", ("", "content"))] -) -def test_directive_no_options(renderer, type, text): - renderer.render( - Document.read(["```{restructuredtext-test-directive}"] + list(text) + ["```"]) - ) - assert renderer.document.pformat() == dedent( - """\ - - - - Directive processed. Type="restructuredtext-test-directive", arguments=[], options={}, content: - - content - """ # noqa: E501 - ) - - -@pytest.mark.skipif( - sys.version_info.major == 3 and sys.version_info.minor <= 5, - reason="option dict keys in wrong order", -) -@pytest.mark.parametrize( - "type,text", - [ - ("block_style", ("---", "option1: a", "option2: b", "---", "", "content")), - ("colon_style", (":option1: a", ":option2: b", "", "content")), - ("block_style_no_space", ("---", "option1: a", "option2: b", "---", "content")), - ("colon_style_no_space", (":option1: a", ":option2: b", "content")), - ( - "block_style_indented", - ("---", " option1: a", " option2: b", "---", "content"), - ), - ("colon_style_indeneted", (" :option1: a", " :option2: b", "content")), - ], -) -def test_directive_options(renderer, type, text): - renderer.render( - Document.read(["```{restructuredtext-test-directive}"] + list(text) + ["```"]) - ) - assert renderer.document.pformat() == dedent( - """\ - - - - Directive processed. Type="restructuredtext-test-directive", arguments=[], options={'option1': 'a', 'option2': 'b'}, content: - - content - """ # noqa: E501 - ) - - -@pytest.mark.parametrize( - "type,text", - [ - ("block_style", ("---", "option1", "option2: b", "---", "", "content")), - ("colon_style", (":option1", ":option2: b", "", "content")), - ], -) -def test_directive_options_error(renderer, type, text, file_regression): - renderer.render( - Document.read(["```{restructuredtext-test-directive}"] + list(text) + ["```"]) - ) - file_regression.check(renderer.document.pformat(), extension=".xml") - - -with open(os.path.join(os.path.dirname(__file__), "sphinx_roles.json"), "r") as fin: - roles_tests = json.load(fin) - - -@pytest.mark.parametrize( - "name,role_data", - [ - (r["name"], r) - for r in roles_tests - if r["import"].startswith("docutils") - and not r["import"].endswith("unimplemented_role") - and not r["import"].endswith("CustomRole") - ], -) -def test_docutils_roles(renderer, name, role_data): - """""" - if name in ["raw"]: - # TODO fix skips - pytest.skip("awaiting fix") - text = "{{{0}}}`{1}`".format(name, role_data.get("content", " ")) - print(text) - renderer.render(tokenize_main([text])[0]) - print( - repr(renderer.document.pformat()).replace(" " * 8, " ").replace('"', '\\"') - ) - assert renderer.document.pformat() == ( - role_data.get("doc_tag", '') - + "\n" - + indent(role_data["output"], " ") - + ("\n" if role_data["output"] else "") - ) - - -@pytest.mark.parametrize( - "name,role_data", - [ - (r["name"], r) - for r in roles_tests - if r["import"].startswith("sphinx") - # and not r["import"].endswith("unimplemented_role") - # and not r["import"].endswith("CustomRole") - ], -) -def test_sphinx_roles(sphinx_renderer, name, role_data): - """""" - # note, I think most of these have are actually directives rather than roles, - # that I've erroneously picked up in my gather function. - if name in ["abbr"]: # adding class="" ?? - # TODO fix skips - pytest.skip("awaiting fix") - sphinx_renderer.render( - tokenize_main(["{{{}}}`{}`".format(name, role_data.get("content", "a"))])[0] - ) - print( - repr(sphinx_renderer.document.pformat()) - .replace(" " * 8, " ") - .replace('"', '\\"') - ) - assert sphinx_renderer.document.pformat() == ( - role_data.get("doc_tag", '') - + "\n" - + indent(role_data["output"], " ") - + ("\n" if role_data["output"] else "") - ) - - -with open( - os.path.join(os.path.dirname(__file__), "sphinx_directives.json"), "r" -) as fin: - directive_tests = json.load(fin) - - -@pytest.mark.parametrize( - "name,directive", - [ - (d["name"], d) - for d in directive_tests - if d["class"].startswith("docutils") and not d.get("sub_only", False) - # todo add substitution definition directive and reference role - ], -) -def test_docutils_directives(renderer, name, directive): - """See https://docutils.sourceforge.io/docs/ref/rst/directives.html""" - # TODO dd domain directives - if name in [ - "role", - "rst-class", - "cssclass", - "line-block", - "block_quote", # this is only used as a base class - ]: - # TODO fix skips - pytest.skip("awaiting fix") - arguments = " ".join(directive["args"]) - renderer.render( - tokenize_main( - [ - "```{{{}}} {}\n".format(name, arguments), - directive.get("content", "") + "\n", - "```\n", - ] - )[0] - ) - print( - repr(renderer.document.pformat()).replace(" " * 8, " ").replace('"', '\\"') - ) - assert renderer.document.pformat() == ( - directive.get("doc_tag", '') - + "\n" - + indent(directive["output"], " ") - + ("\n" if directive["output"] else "") - ) - - -@pytest.mark.parametrize( - "name,directive", - [ - (d["name"], d) - for d in directive_tests - if d["class"].startswith("sphinx") and not d.get("sub_only", False) - ], -) -def test_sphinx_directives(sphinx_renderer, name, directive): - """See https://docutils.sourceforge.io/docs/ref/rst/directives.html""" - # TODO make sure all directives from domains are added (std and rst are done) - # (some were erroneously added to roles) - if name in ["include", "literalinclude"]: - # this is tested in the sphinx build level tests - return - if name in [ - "meta", - # TODO to properly parse, this requires that a directive with no content, - # and no options, can have its argument be the body - "productionlist", - ]: - # TODO fix skips - pytest.skip("awaiting fix") - arguments = " ".join(directive["args"]) - sphinx_renderer.render( - tokenize_main( - [ - "```{{{}}} {}\n".format(name, arguments), - directive.get("content", "") + "\n", - "```\n", - ] - )[0] - ) - print( - repr(sphinx_renderer.document.pformat()) - .replace(" " * 8, " ") - .replace('"', '\\"') - ) - assert sphinx_renderer.document.pformat() == ( - directive.get("doc_tag", '') - + "\n" - + indent(directive["output"], " ") - + ("\n" if directive["output"] else "") - ) diff --git a/tests/test_renderers/test_roles_directives/test_directive_options_error_block_style_text0_.xml b/tests/test_renderers/test_roles_directives/test_directive_options_error_block_style_text0_.xml deleted file mode 100644 index 9aee364a..00000000 --- a/tests/test_renderers/test_roles_directives/test_directive_options_error_block_style_text0_.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - Directive 'restructuredtext-test-directive': - Invalid options YAML: mapping values are not allowed here - in "", line 2, column 8: - option2: b - ^ - - --- - option1 - option2: b - --- - - content diff --git a/tests/test_renderers/test_roles_directives/test_directive_options_error_colon_style_text1_.xml b/tests/test_renderers/test_roles_directives/test_directive_options_error_colon_style_text1_.xml deleted file mode 100644 index 7dec415e..00000000 --- a/tests/test_renderers/test_roles_directives/test_directive_options_error_colon_style_text1_.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - Directive 'restructuredtext-test-directive': - Invalid options YAML: mapping values are not allowed here - in "", line 2, column 8: - option2: b - ^ - - :option1 - :option2: b - - content diff --git a/tests/test_syntax/test_ast.py b/tests/test_syntax/test_ast.py deleted file mode 100644 index fdaac273..00000000 --- a/tests/test_syntax/test_ast.py +++ /dev/null @@ -1,140 +0,0 @@ -from textwrap import dedent - -import pytest - -from myst_parser import text_to_tokens -from myst_parser.json_renderer import JsonRenderer -from myst_parser.block_tokens import Document - - -@pytest.fixture -def json_renderer(): - renderer = JsonRenderer() - with renderer: - yield renderer - - -def test_render_tokens(): - root = text_to_tokens("abc") - assert isinstance(root, Document) - assert root.children, root.children - - -def test_walk(json_renderer, data_regression): - doc = Document.read( - dedent( - """\ - a **b** - - c [*d*](link) - """ - ) - ) - tree = [(repr(t.node), repr(t.parent), t.depth) for t in doc.walk()] - data_regression.check(tree) - - -@pytest.mark.parametrize( - "name,strings", - [ - ("basic", ["{name}`some content`"]), - ("indent_2", [" {name}`some content`"]), - ("indent_4", [" {name}`some econtent`"]), - ("escaped", ["\\{name}`some content`"]), - ("inline", ["a {name}`some content`"]), - ("multiple", ["{name}`some content` {name2}`other`"]), - ("internal_emphasis", ["{name}`*content*`"]), - ("external_emphasis", ["*{name}`content`*"]), - ("internal_math", ["{name}`some $content$`"]), - ("external_math", ["${name}`some content`$"]), - ("internal_code", ["{name}` ``some content`` `"]), - ("external_code", ["`` {name}`some content` ``"]), - ], -) -def test_role(name, json_renderer, data_regression, strings): - document = Document.read(strings) - data_regression.check(json_renderer.render(document, as_string=False)) - - -@pytest.mark.parametrize( - "name,strings", - [ - ("basic", ["(target)="]), - ("indent_2", [" (target)="]), - ("indent_4", [" (target)="]), - ("escaped", ["\\(target)="]), - ("inline", ["a (target)="]), - ("internal_emphasis", ["(*target*)="]), - ("external_emphasis", ["*(target)=*"]), - ], -) -def test_target(name, json_renderer, data_regression, strings): - document = Document.read(strings) - data_regression.check(json_renderer.render(document, as_string=False)) - - -@pytest.mark.parametrize( - "name,strings", - [ - ("basic", [r"% comment"]), - ("indent_2", [r" % comment"]), - ("indent_4", [r" % comment"]), - ("escaped", [r"\% comment"]), - ("inline", [r"a % comment"]), - ("follows_list", ["- item", r"% comment"]), - ], -) -def test_comment(name, json_renderer, data_regression, strings): - document = Document.read(strings) - data_regression.check(json_renderer.render(document, as_string=False)) - - -@pytest.mark.parametrize( - "name,strings", - [ - ("basic", ["+++"]), - ("indent_2", [" +++"]), - ("indent_4", [" +++"]), - ("escaped", [r"\+++"]), - ("inline", ["a +++"]), - ("following_content", ["+++ a"]), - ("following_space", ["+++ "]), - ("follows_list", ["- item", "+++"]), - ("following_content_no_space", ["+++a"]), - ], -) -def test_block_break(name, json_renderer, data_regression, strings): - document = Document.read(strings) - data_regression.check(json_renderer.render(document, as_string=False)) - - -@pytest.mark.parametrize("name,strings", [("basic", ["---", "a: b", "---"])]) -def test_front_matter(name, json_renderer, data_regression, strings): - document = Document.read(strings) - data_regression.check(json_renderer.render(document, as_string=False)) - - -@pytest.mark.parametrize( - "name,strings", - [ - ("ref_first", ["[ref]", "", '[ref]: https://google.com "title"']), - ("ref_last", ['[ref]: https://google.com "title"', "", "[ref]"]), - ("ref_syntax", ["[*syntax*]", "", '[*syntax*]: https://google.com "title"']), - ("ref_escape", ["[ref]", "", '\\[ref]: https://google.com "title"']), - ], -) -def test_link_references(name, strings, json_renderer, data_regression): - document = Document.read(strings) - data_regression.check(json_renderer.render(document, as_string=False)) - - -def test_table(json_renderer, data_regression): - string = dedent( - """\ - | abc | d | e | - | --- | --- | :---: | - | hjk | *y* | z | - """ - ) - document = Document.read(string) - data_regression.check(json_renderer.render(document, as_string=False)) diff --git a/tests/test_syntax/test_ast/test_block_break_basic_strings0_.yml b/tests/test_syntax/test_ast/test_block_break_basic_strings0_.yml deleted file mode 100644 index 9ce89d33..00000000 --- a/tests/test_syntax/test_ast/test_block_break_basic_strings0_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: '' - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: +++ - type: BlockBreak -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_escaped_strings3_.yml b/tests/test_syntax/test_ast/test_block_break_escaped_strings3_.yml deleted file mode 100644 index e493e520..00000000 --- a/tests/test_syntax/test_ast/test_block_break_escaped_strings3_.yml +++ /dev/null @@ -1,34 +0,0 @@ -children: -- children: - - children: - - content: + - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: EscapeSequence - - content: ++ - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_following_content_no_space_strings8_.yml b/tests/test_syntax/test_ast/test_block_break_following_content_no_space_strings8_.yml deleted file mode 100644 index 3ee9c861..00000000 --- a/tests/test_syntax/test_ast/test_block_break_following_content_no_space_strings8_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: a - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: +++a - type: BlockBreak -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_following_content_strings5_.yml b/tests/test_syntax/test_ast/test_block_break_following_content_strings5_.yml deleted file mode 100644 index 2ead80a2..00000000 --- a/tests/test_syntax/test_ast/test_block_break_following_content_strings5_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: a - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: +++ a - type: BlockBreak -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_following_space_strings6_.yml b/tests/test_syntax/test_ast/test_block_break_following_space_strings6_.yml deleted file mode 100644 index d6149398..00000000 --- a/tests/test_syntax/test_ast/test_block_break_following_space_strings6_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: '' - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: '+++ ' - type: BlockBreak -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_follows_list_strings7_.yml b/tests/test_syntax/test_ast/test_block_break_follows_list_strings7_.yml deleted file mode 100644 index 7d91587d..00000000 --- a/tests/test_syntax/test_ast/test_block_break_follows_list_strings7_.yml +++ /dev/null @@ -1,48 +0,0 @@ -children: -- children: - - children: - - children: - - content: item - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph - leader: '-' - loose: false - next_marker: null - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - prepend: 2 - type: ListItem - loose: false - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - start_at: null - type: List -- content: '' - position: - data: {} - line_end: null - line_start: 2 - uri: null - raw: +++ - type: BlockBreak -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_indent_2_strings1_.yml b/tests/test_syntax/test_ast/test_block_break_indent_2_strings1_.yml deleted file mode 100644 index 7aee4511..00000000 --- a/tests/test_syntax/test_ast/test_block_break_indent_2_strings1_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: '' - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: ' +++' - type: BlockBreak -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_indent_4_strings2_.yml b/tests/test_syntax/test_ast/test_block_break_indent_4_strings2_.yml deleted file mode 100644 index 0aa3fdf1..00000000 --- a/tests/test_syntax/test_ast/test_block_break_indent_4_strings2_.yml +++ /dev/null @@ -1,23 +0,0 @@ -children: -- children: - - content: '+++ - - ' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: RawText - language: '' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: BlockCode -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_block_break_inline_strings4_.yml b/tests/test_syntax/test_ast/test_block_break_inline_strings4_.yml deleted file mode 100644 index 29fa57e0..00000000 --- a/tests/test_syntax/test_ast/test_block_break_inline_strings4_.yml +++ /dev/null @@ -1,20 +0,0 @@ -children: -- children: - - content: a +++ - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_comment_basic_strings0_.yml b/tests/test_syntax/test_ast/test_comment_basic_strings0_.yml deleted file mode 100644 index fe6014e9..00000000 --- a/tests/test_syntax/test_ast/test_comment_basic_strings0_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: comment - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: '% comment' - type: LineComment -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_comment_escaped_strings3_.yml b/tests/test_syntax/test_ast/test_comment_escaped_strings3_.yml deleted file mode 100644 index 538f708e..00000000 --- a/tests/test_syntax/test_ast/test_comment_escaped_strings3_.yml +++ /dev/null @@ -1,34 +0,0 @@ -children: -- children: - - children: - - content: '%' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: EscapeSequence - - content: ' comment' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_comment_follows_list_strings5_.yml b/tests/test_syntax/test_ast/test_comment_follows_list_strings5_.yml deleted file mode 100644 index afb89f8a..00000000 --- a/tests/test_syntax/test_ast/test_comment_follows_list_strings5_.yml +++ /dev/null @@ -1,48 +0,0 @@ -children: -- children: - - children: - - children: - - content: item - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph - leader: '-' - loose: false - next_marker: null - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - prepend: 2 - type: ListItem - loose: false - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - start_at: null - type: List -- content: comment - position: - data: {} - line_end: null - line_start: 2 - uri: null - raw: '% comment' - type: LineComment -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_comment_indent_2_strings1_.yml b/tests/test_syntax/test_ast/test_comment_indent_2_strings1_.yml deleted file mode 100644 index cc2c229c..00000000 --- a/tests/test_syntax/test_ast/test_comment_indent_2_strings1_.yml +++ /dev/null @@ -1,14 +0,0 @@ -children: -- content: comment - position: - data: {} - line_end: null - line_start: 1 - uri: null - raw: ' % comment' - type: LineComment -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_comment_indent_4_strings2_.yml b/tests/test_syntax/test_ast/test_comment_indent_4_strings2_.yml deleted file mode 100644 index 3475a006..00000000 --- a/tests/test_syntax/test_ast/test_comment_indent_4_strings2_.yml +++ /dev/null @@ -1,23 +0,0 @@ -children: -- children: - - content: '% comment - - ' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: RawText - language: '' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: BlockCode -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_comment_inline_strings4_.yml b/tests/test_syntax/test_ast/test_comment_inline_strings4_.yml deleted file mode 100644 index 231113cc..00000000 --- a/tests/test_syntax/test_ast/test_comment_inline_strings4_.yml +++ /dev/null @@ -1,20 +0,0 @@ -children: -- children: - - content: a % comment - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_front_matter_basic_strings0_.yml b/tests/test_syntax/test_ast/test_front_matter_basic_strings0_.yml deleted file mode 100644 index 0482f95e..00000000 --- a/tests/test_syntax/test_ast/test_front_matter_basic_strings0_.yml +++ /dev/null @@ -1,15 +0,0 @@ -children: [] -footnotes: {} -footref_order: [] -front_matter: - content: 'a: b - - ' - position: - data: {} - line_end: 3 - line_start: 1 - uri: null - type: FrontMatter -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_link_references_ref_escape_strings3_.yml b/tests/test_syntax/test_ast/test_link_references_ref_escape_strings3_.yml deleted file mode 100644 index e9d4e56d..00000000 --- a/tests/test_syntax/test_ast/test_link_references_ref_escape_strings3_.yml +++ /dev/null @@ -1,48 +0,0 @@ -children: -- children: - - content: '[ref]' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -- children: - - children: - - content: '[' - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - type: RawText - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - type: EscapeSequence - - content: 'ref]: https://google.com "title"' - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - type: RawText - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_link_references_ref_first_strings0_.yml b/tests/test_syntax/test_ast/test_link_references_ref_first_strings0_.yml deleted file mode 100644 index 9aaa1f95..00000000 --- a/tests/test_syntax/test_ast/test_link_references_ref_first_strings0_.yml +++ /dev/null @@ -1,32 +0,0 @@ -children: -- children: - - children: - - content: ref - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: https://google.com - title: title - type: Link - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: - ref: - - https://google.com - - title -type: Document diff --git a/tests/test_syntax/test_ast/test_link_references_ref_last_strings1_.yml b/tests/test_syntax/test_ast/test_link_references_ref_last_strings1_.yml deleted file mode 100644 index 81da5144..00000000 --- a/tests/test_syntax/test_ast/test_link_references_ref_last_strings1_.yml +++ /dev/null @@ -1,32 +0,0 @@ -children: -- children: - - children: - - content: ref - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - type: RawText - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - target: https://google.com - title: title - type: Link - position: - data: {} - line_end: 3 - line_start: 3 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: - ref: - - https://google.com - - title -type: Document diff --git a/tests/test_syntax/test_ast/test_link_references_ref_syntax_strings2_.yml b/tests/test_syntax/test_ast/test_link_references_ref_syntax_strings2_.yml deleted file mode 100644 index 4e743581..00000000 --- a/tests/test_syntax/test_ast/test_link_references_ref_syntax_strings2_.yml +++ /dev/null @@ -1,39 +0,0 @@ -children: -- children: - - children: - - children: - - content: syntax - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Emphasis - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: https://google.com - title: title - type: Link - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: - '*syntax*': - - https://google.com - - title -type: Document diff --git a/tests/test_syntax/test_ast/test_role_basic_strings0_.yml b/tests/test_syntax/test_ast/test_role_basic_strings0_.yml deleted file mode 100644 index fd2922fd..00000000 --- a/tests/test_syntax/test_ast/test_role_basic_strings0_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: some content - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_escaped_strings3_.yml b/tests/test_syntax/test_ast/test_role_escaped_strings3_.yml deleted file mode 100644 index b01dad0a..00000000 --- a/tests/test_syntax/test_ast/test_role_escaped_strings3_.yml +++ /dev/null @@ -1,48 +0,0 @@ -children: -- children: - - children: - - content: '{' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: EscapeSequence - - content: name} - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - - children: - - content: some content - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: InlineCode - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_external_code_strings11_.yml b/tests/test_syntax/test_ast/test_role_external_code_strings11_.yml deleted file mode 100644 index cb97c850..00000000 --- a/tests/test_syntax/test_ast/test_role_external_code_strings11_.yml +++ /dev/null @@ -1,27 +0,0 @@ -children: -- children: - - children: - - content: '{name}`some content`' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: InlineCode - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_external_emphasis_strings7_.yml b/tests/test_syntax/test_ast/test_role_external_emphasis_strings7_.yml deleted file mode 100644 index 9a8fa557..00000000 --- a/tests/test_syntax/test_ast/test_role_external_emphasis_strings7_.yml +++ /dev/null @@ -1,35 +0,0 @@ -children: -- children: - - children: - - children: - - content: content - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Emphasis - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_external_math_strings9_.yml b/tests/test_syntax/test_ast/test_role_external_math_strings9_.yml deleted file mode 100644 index 17f269be..00000000 --- a/tests/test_syntax/test_ast/test_role_external_math_strings9_.yml +++ /dev/null @@ -1,20 +0,0 @@ -children: -- children: - - content: ${name}`some content`$ - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Math - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_indent_2_strings1_.yml b/tests/test_syntax/test_ast/test_role_indent_2_strings1_.yml deleted file mode 100644 index fd2922fd..00000000 --- a/tests/test_syntax/test_ast/test_role_indent_2_strings1_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: some content - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_indent_4_strings2_.yml b/tests/test_syntax/test_ast/test_role_indent_4_strings2_.yml deleted file mode 100644 index d50312df..00000000 --- a/tests/test_syntax/test_ast/test_role_indent_4_strings2_.yml +++ /dev/null @@ -1,23 +0,0 @@ -children: -- children: - - content: '{name}`some econtent` - - ' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: RawText - language: '' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: BlockCode -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_inline_strings4_.yml b/tests/test_syntax/test_ast/test_role_inline_strings4_.yml deleted file mode 100644 index 41dc8aaa..00000000 --- a/tests/test_syntax/test_ast/test_role_inline_strings4_.yml +++ /dev/null @@ -1,35 +0,0 @@ -children: -- children: - - content: 'a ' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - - children: - - content: some content - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_internal_code_strings10_.yml b/tests/test_syntax/test_ast/test_role_internal_code_strings10_.yml deleted file mode 100644 index b489e65e..00000000 --- a/tests/test_syntax/test_ast/test_role_internal_code_strings10_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: '``some content``' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_internal_emphasis_strings6_.yml b/tests/test_syntax/test_ast/test_role_internal_emphasis_strings6_.yml deleted file mode 100644 index 9c209b9c..00000000 --- a/tests/test_syntax/test_ast/test_role_internal_emphasis_strings6_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: '*content*' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_internal_math_strings8_.yml b/tests/test_syntax/test_ast/test_role_internal_math_strings8_.yml deleted file mode 100644 index 06337578..00000000 --- a/tests/test_syntax/test_ast/test_role_internal_math_strings8_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: some $content$ - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_role_multiple_strings5_.yml b/tests/test_syntax/test_ast/test_role_multiple_strings5_.yml deleted file mode 100644 index f612ce0a..00000000 --- a/tests/test_syntax/test_ast/test_role_multiple_strings5_.yml +++ /dev/null @@ -1,50 +0,0 @@ -children: -- children: - - children: - - content: some content - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name - type: Role - - content: ' ' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - - children: - - content: other - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - role_name: name2 - type: Role - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_table.yml b/tests/test_syntax/test_ast/test_table.yml deleted file mode 100644 index 4a13e891..00000000 --- a/tests/test_syntax/test_ast/test_table.yml +++ /dev/null @@ -1,137 +0,0 @@ -children: -- children: - - children: - - align: null - children: - - content: hjk - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: RawText - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: TableCell - - align: null - children: - - children: - - content: y - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: RawText - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: Emphasis - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: TableCell - - align: 0 - children: - - content: z - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: RawText - position: - data: {} - line_end: null - line_start: 3 - uri: null - type: TableCell - position: - data: {} - line_end: null - line_start: 3 - uri: null - row_align: - - null - - null - - 0 - type: TableRow - column_align: - - null - - null - - 0 - header: - children: - - align: null - children: - - content: abc - position: - data: {} - line_end: null - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: null - line_start: 1 - uri: null - type: TableCell - - align: null - children: - - content: d - position: - data: {} - line_end: null - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: null - line_start: 1 - uri: null - type: TableCell - - align: 0 - children: - - content: e - position: - data: {} - line_end: null - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: null - line_start: 1 - uri: null - type: TableCell - position: - data: {} - line_end: null - line_start: 1 - uri: null - row_align: - - null - - null - - 0 - type: TableRow - position: - data: {} - line_end: 3 - line_start: 1 - uri: null - type: Table -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_basic_strings0_.yml b/tests/test_syntax/test_ast/test_target_basic_strings0_.yml deleted file mode 100644 index 153fd150..00000000 --- a/tests/test_syntax/test_ast/test_target_basic_strings0_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: target - type: Target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_escaped_strings3_.yml b/tests/test_syntax/test_ast/test_target_escaped_strings3_.yml deleted file mode 100644 index 8feecfd9..00000000 --- a/tests/test_syntax/test_ast/test_target_escaped_strings3_.yml +++ /dev/null @@ -1,34 +0,0 @@ -children: -- children: - - children: - - content: ( - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: EscapeSequence - - content: target)= - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_external_emphasis_strings6_.yml b/tests/test_syntax/test_ast/test_target_external_emphasis_strings6_.yml deleted file mode 100644 index 2118b871..00000000 --- a/tests/test_syntax/test_ast/test_target_external_emphasis_strings6_.yml +++ /dev/null @@ -1,35 +0,0 @@ -children: -- children: - - children: - - children: - - content: target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: target - type: Target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Emphasis - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_indent_2_strings1_.yml b/tests/test_syntax/test_ast/test_target_indent_2_strings1_.yml deleted file mode 100644 index 153fd150..00000000 --- a/tests/test_syntax/test_ast/test_target_indent_2_strings1_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: target - type: Target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_indent_4_strings2_.yml b/tests/test_syntax/test_ast/test_target_indent_4_strings2_.yml deleted file mode 100644 index 27e96e42..00000000 --- a/tests/test_syntax/test_ast/test_target_indent_4_strings2_.yml +++ /dev/null @@ -1,23 +0,0 @@ -children: -- children: - - content: '(target)= - - ' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: RawText - language: '' - position: - data: {} - line_end: 1 - line_start: 0 - uri: null - type: BlockCode -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_inline_strings4_.yml b/tests/test_syntax/test_ast/test_target_inline_strings4_.yml deleted file mode 100644 index e53306b1..00000000 --- a/tests/test_syntax/test_ast/test_target_inline_strings4_.yml +++ /dev/null @@ -1,35 +0,0 @@ -children: -- children: - - content: 'a ' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - - children: - - content: target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: target - type: Target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_target_internal_emphasis_strings5_.yml b/tests/test_syntax/test_ast/test_target_internal_emphasis_strings5_.yml deleted file mode 100644 index 3121a7c9..00000000 --- a/tests/test_syntax/test_ast/test_target_internal_emphasis_strings5_.yml +++ /dev/null @@ -1,28 +0,0 @@ -children: -- children: - - children: - - content: '*target*' - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: RawText - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - target: '*target*' - type: Target - position: - data: {} - line_end: 1 - line_start: 1 - uri: null - type: Paragraph -footnotes: {} -footref_order: [] -front_matter: null -link_definitions: {} -type: Document diff --git a/tests/test_syntax/test_ast/test_walk.yml b/tests/test_syntax/test_ast/test_walk.yml deleted file mode 100644 index 8c3fd0e7..00000000 --- a/tests/test_syntax/test_ast/test_walk.yml +++ /dev/null @@ -1,27 +0,0 @@ -- - Paragraph(children=2, position=Position(lines=[1:1])) - - Document(children=2, link_definitions=0, footnotes=0, footref_order=0, front_matter=None) - - 1 -- - Paragraph(children=2, position=Position(lines=[3:3])) - - Document(children=2, link_definitions=0, footnotes=0, footref_order=0, front_matter=None) - - 1 -- - RawText() - - Paragraph(children=2, position=Position(lines=[1:1])) - - 2 -- - Strong(children=1) - - Paragraph(children=2, position=Position(lines=[1:1])) - - 2 -- - RawText() - - Paragraph(children=2, position=Position(lines=[3:3])) - - 2 -- - Link(target='link', title='') - - Paragraph(children=2, position=Position(lines=[3:3])) - - 2 -- - RawText() - - Strong(children=1) - - 3 -- - Emphasis(children=1) - - Link(target='link', title='') - - 3 -- - RawText() - - Emphasis(children=1) - - 4 From b078d0f6f22429f565e3062537ad6fc9322f8ce1 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Fri, 27 Mar 2020 03:46:58 +0000 Subject: [PATCH 02/32] updates --- myst_parser/__init__.py | 1 + myst_parser/docutils_renderer.py | 30 +++--- myst_parser/main.py | 42 +++++++-- myst_parser/sphinx_parser.py | 30 +++++- myst_parser/sphinx_renderer.py | 30 ++++++ setup.py | 2 +- .../fixtures/{basic.md => syntax_elements.md} | 26 +++++- tests/test_renderers/test_fixtures.py | 7 +- tests/test_sphinx/sourcedirs/basic/content.md | 4 + .../sourcedirs/conf_values/conf.py | 3 + .../sourcedirs/conf_values/index.md | 5 + tests/test_sphinx/test_sphinx_builds.py | 26 +++++- .../test_sphinx_builds/test_basic.xml | 91 +++++++++++++++++++ .../test_sphinx_builds/test_conf_values.xml | 8 ++ 14 files changed, 274 insertions(+), 31 deletions(-) rename tests/test_renderers/fixtures/{basic.md => syntax_elements.md} (94%) create mode 100644 tests/test_sphinx/sourcedirs/conf_values/conf.py create mode 100644 tests/test_sphinx/sourcedirs/conf_values/index.md create mode 100644 tests/test_sphinx/test_sphinx_builds/test_basic.xml create mode 100644 tests/test_sphinx/test_sphinx_builds/test_conf_values.xml diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index b065df69..f0e09a84 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -7,5 +7,6 @@ def setup(app): app.add_source_suffix(".md", "markdown") app.add_source_parser(MystParser) + app.add_config_value("myst_config", {}, "env") return {"version": __version__, "parallel_read_safe": True} diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 62aabf3a..7eb8d934 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -1,4 +1,5 @@ """NOTE: this will eventually be moved out of core""" +from collections import OrderedDict from contextlib import contextmanager import inspect import json @@ -84,11 +85,12 @@ def render(self, tokens: List[Token], options, env: AttrDict): tokens = nest_tokens(tokens) # move footnote definitions to env - self.env.setdefault("foot_refs", []) + self.env.setdefault("foot_refs", {}) new_tokens = [] for token in tokens: if token.type == "footnote_reference_open": - self.env["foot_refs"].append(token) + label = token.meta["label"] + self.env["foot_refs"].setdefault(label, []).append(token) else: new_tokens.append(token) tokens = new_tokens @@ -107,17 +109,19 @@ def render(self, tokens: List[Token], options, env: AttrDict): if not self.config.get("output_footnotes", True): return self.document - # add footnotes - referenced = { - v["label"] for v in self.env.get("footnotes", {}).get("list", {}).values() - } - # only output referenced - foot_refs = [f for f in self.env["foot_refs"] if f.meta["label"] in referenced] + # we don't use the foot_references stored in the env + # since references within directives/roles will have been added after + # those from the initial markdown parse + # instead we gather them from a walk of the created document + foot_refs = OrderedDict() + for refnode in self.document.traverse(nodes.footnote_reference): + if refnode["refname"] not in foot_refs: + foot_refs[refnode["refname"]] = True if foot_refs: self.current_node.append(nodes.transition()) - for footref in foot_refs: # TODO sort by referenced - self.render_footnote_reference_open(footref) + for footref in foot_refs: + self.render_footnote_reference_open(self.env["foot_refs"][footref][0]) return self.document @@ -145,11 +149,12 @@ def nested_render_text(self, text: str, lineno: int, disable_front_matter=True): tokens = nest_tokens(tokens) # move footnote definitions to env - self.env.setdefault("foot_refs", []) + self.env.setdefault("foot_refs", {}) new_tokens = [] for token in tokens: if token.type == "footnote_reference_open": - self.env["foot_refs"].append(token) + label = token.meta["label"] + self.env["foot_refs"].setdefault(label, []).append(token) else: new_tokens.append(token) tokens = new_tokens @@ -223,7 +228,6 @@ def renderInlineAsText(self, tokens: List[Token]) -> str: # result += self.renderInlineAsText(token.children) else: result += self.renderInlineAsText(token.children) - return result # ### render methods for commonmark tokens diff --git a/myst_parser/main.py b/myst_parser/main.py index acffe99a..278c9799 100644 --- a/myst_parser/main.py +++ b/myst_parser/main.py @@ -1,3 +1,5 @@ +from typing import List + from markdown_it import MarkdownIt from markdown_it.renderer import RendererHTML from markdown_it.extensions.front_matter import front_matter_plugin @@ -6,11 +8,15 @@ from markdown_it.extensions.texmath import texmath_plugin from markdown_it.extensions.footnote import footnote_plugin +from docutils.nodes import document as docutils_doc from myst_parser.docutils_renderer import DocutilsRenderer from myst_parser.docutils_renderer import make_document -def default_parser(renderer="sphinx") -> MarkdownIt: +def default_parser( + renderer="sphinx", disable_syntax=(), math_delimiters="dollars" +) -> MarkdownIt: + """Return the default parser configuration for MyST""" from myst_parser.sphinx_renderer import SphinxRenderer renderers = { @@ -26,21 +32,41 @@ def default_parser(renderer="sphinx") -> MarkdownIt: .use(front_matter_plugin) .use(myst_block_plugin) .use(myst_role_plugin) - .use(texmath_plugin) + .use(texmath_plugin, delimiters=math_delimiters) .use(footnote_plugin) .disable("footnote_inline") # disable this for now, because it need a new implementation in the renderer .disable("footnote_tail") # we don't want to yet remove un-referenced, because they may be referenced # in admonition type directives - # we need to do our own post process to gather them - # (and also add nodes.transition() above) + # so we do our own post processing ) + for name in disable_syntax: + md.disable(name, True) return md -def to_docutils(text, options=None, env=None, document=None, in_sphinx_env=False): - md = default_parser() +def to_docutils( + text: str, + options=None, + env=None, + document: docutils_doc = None, + in_sphinx_env: bool = False, + disable_syntax: List[str] = (), + math_delimiters: str = "dollars", +) -> docutils_doc: + """Render text to the docutils AST + + :param text: the text to render + :param options: options to update the parser with + :param env: The sandbox environment for the parse + (will contain e.g. reference definitions) + :param document: the docutils root node to use (otherwise a new one will be created) + :param in_sphinx_env: initialise a minimal sphinx environment (useful for testing) + :param disable_syntax: list of syntax element names to disable + + """ + md = default_parser(disable_syntax=disable_syntax, math_delimiters=math_delimiters) if options: md.options.update(options) md.options["document"] = document or make_document() @@ -53,11 +79,11 @@ def to_docutils(text, options=None, env=None, document=None, in_sphinx_env=False return md.render(text, env) -def to_html(text, env=None): +def to_html(text: str, env=None): md = default_parser("html") return md.render(text, env) -def to_tokens(text, env=None): +def to_tokens(text: str, env=None): md = default_parser() return md.parse(text, env) diff --git a/myst_parser/sphinx_parser.py b/myst_parser/sphinx_parser.py index 4de17f88..e2a98c57 100644 --- a/myst_parser/sphinx_parser.py +++ b/myst_parser/sphinx_parser.py @@ -14,7 +14,11 @@ class MystParser(Parser): supported = ("md", "markdown", "myst") translate_section_name = None - default_config = {"known_url_schemes": None} + default_config = { + "known_url_schemes": None, + "disable_syntax": (), + "math_delimiters": "dollars", + } # these specs are copied verbatim from the docutils RST parser settings_spec = ( @@ -169,12 +173,30 @@ def parse(self, inputstring: str, document: nodes.document): :param inputstring: The source string to parse :param document: The root docutils node to add AST elements to """ - # TODO add conf.py configurable settings self.config = self.default_config.copy() try: - new_cfg = self.document.settings.env.config.myst_config + new_cfg = document.settings.env.config.myst_config self.config.update(new_cfg) except AttributeError: pass - to_docutils(inputstring, options=self.config, document=document) + # TODO raise errors or log error with sphinx? + try: + for s in self.config["disable_syntax"]: + assert isinstance(s, str) + except (AssertionError, TypeError): + raise TypeError("disable_syntax not of type List[str]") + + allowed_delimiters = ["brackets", "kramdown", "dollars", "julia"] + if not self.config["math_delimiters"] in allowed_delimiters: + raise ValueError( + f"math_delimiters config not an allowed name: {allowed_delimiters}" + ) + + to_docutils( + inputstring, + options=self.config, + document=document, + disable_syntax=self.config["disable_syntax"] or [], + math_delimiters=self.config["math_delimiters"], + ) diff --git a/myst_parser/sphinx_renderer.py b/myst_parser/sphinx_renderer.py index 19c9993b..302bd18e 100644 --- a/myst_parser/sphinx_renderer.py +++ b/myst_parser/sphinx_renderer.py @@ -1,6 +1,7 @@ from contextlib import contextmanager import copy from urllib.parse import unquote +from typing import cast from docutils import nodes from docutils.parsers.rst import directives, roles @@ -8,6 +9,7 @@ from sphinx import addnodes from sphinx.application import builtin_extensions, Sphinx from sphinx.config import Config +from sphinx.domains.math import MathDomain from sphinx.environment import BuildEnvironment from sphinx.events import EventManager from sphinx.project import Project @@ -43,6 +45,34 @@ def handle_cross_reference(self, token, destination): with self.current_node_context(text_node): self.render_children(token) + def render_math_block_eqno(self, token): + label = token.info + content = token.content + node = nodes.math_block( + content, content, nowrap=False, number=None, label=label + ) + target = self.add_math_target(node) + self.add_line_and_source_path(target, token) + self.current_node.append(target) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def add_math_target(self, node): + # Code mainly copied from sphinx.directives.patches.MathDirective + env = self.document.settings.env + + # register label to domain + domain = cast(MathDomain, env.get_domain("math")) + domain.note_equation(env.docname, node["label"], location=node) + node["number"] = domain.get_equation_number_for(node["label"]) + node["docname"] = env.docname + + # create target node + node_id = nodes.make_id("equation-%s" % node["label"]) + target = nodes.target("", "", ids=[node_id]) + self.document.note_explicit_target(target) + return target + def minimal_sphinx_app(configuration=None, sourcedir=None): """Create a minimal Sphinx environment; loading sphinx roles, directives, etc. diff --git a/setup.py b/setup.py index 36d434af..798e0cec 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["mistletoe-ebp~=0.10"], + install_requires=["markdown-it-py~=0.2.3"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], diff --git a/tests/test_renderers/fixtures/basic.md b/tests/test_renderers/fixtures/syntax_elements.md similarity index 94% rename from tests/test_renderers/fixtures/basic.md rename to tests/test_renderers/fixtures/syntax_elements.md index a4fb469d..fa2c8c05 100644 --- a/tests/test_renderers/fixtures/basic.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -124,7 +124,7 @@ foo ``` . - + foo . @@ -264,6 +264,30 @@ $$foo$$ foo . +-------------------------- +Math Block With Equation Label: +. +$$foo$$ (abc) +. + + + + foo +. + +-------------------------- +Table: +. +a|b +-|- +1|2 +. + + + + foo +. + -------------------------- Sphinx Role containing backtick: . diff --git a/tests/test_renderers/test_fixtures.py b/tests/test_renderers/test_fixtures.py index 9b0ae818..fd24f041 100644 --- a/tests/test_renderers/test_fixtures.py +++ b/tests/test_renderers/test_fixtures.py @@ -9,10 +9,11 @@ @pytest.mark.parametrize( - "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("basic.md")) + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("syntax_elements.md")), ) -def test_basic(line, title, input, expected): - document = to_docutils(input) +def test_syntax_elements(line, title, input, expected): + document = to_docutils(input, in_sphinx_env=True) print(document.pformat()) assert "\n".join( [l.rstrip() for l in document.pformat().splitlines()] diff --git a/tests/test_sphinx/sourcedirs/basic/content.md b/tests/test_sphinx/sourcedirs/basic/content.md index 17a3ab87..7098ac81 100644 --- a/tests/test_sphinx/sourcedirs/basic/content.md +++ b/tests/test_sphinx/sourcedirs/basic/content.md @@ -34,6 +34,10 @@ Caption $$b=2$$ +$$c=2$$ (eq:label) + +{eq}`eq:label` + `` a=1{`} `` | a | b | diff --git a/tests/test_sphinx/sourcedirs/conf_values/conf.py b/tests/test_sphinx/sourcedirs/conf_values/conf.py new file mode 100644 index 00000000..1a66017a --- /dev/null +++ b/tests/test_sphinx/sourcedirs/conf_values/conf.py @@ -0,0 +1,3 @@ +extensions = ["myst_parser"] +exclude_patterns = ["_build"] +myst_config = {"disable_syntax": ["emphasis"], "math_delimiters": "brackets"} diff --git a/tests/test_sphinx/sourcedirs/conf_values/index.md b/tests/test_sphinx/sourcedirs/conf_values/index.md new file mode 100644 index 00000000..ee3150e2 --- /dev/null +++ b/tests/test_sphinx/sourcedirs/conf_values/index.md @@ -0,0 +1,5 @@ +# Test + +*disabled* + +\[a=1\] diff --git a/tests/test_sphinx/test_sphinx_builds.py b/tests/test_sphinx/test_sphinx_builds.py index aa7f0431..9d5eef67 100644 --- a/tests/test_sphinx/test_sphinx_builds.py +++ b/tests/test_sphinx/test_sphinx_builds.py @@ -13,7 +13,14 @@ @pytest.mark.sphinx( buildername="html", srcdir=os.path.join(SOURCE_DIR, "basic"), freshenv=True ) -def test_basic(app, status, warning, get_sphinx_app_output, remove_sphinx_builds): +def test_basic( + app, + status, + warning, + get_sphinx_app_doctree, + get_sphinx_app_output, + remove_sphinx_builds, +): """basic test.""" app.build() @@ -21,9 +28,26 @@ def test_basic(app, status, warning, get_sphinx_app_output, remove_sphinx_builds warnings = warning.getvalue().strip() assert warnings == "" + get_sphinx_app_doctree(app, filename="content.doctree", regress=True) get_sphinx_app_output(app, filename="content.html", regress_html=True) +@pytest.mark.sphinx( + buildername="html", srcdir=os.path.join(SOURCE_DIR, "conf_values"), freshenv=True +) +def test_conf_values( + app, status, warning, get_sphinx_app_doctree, remove_sphinx_builds +): + """basic test.""" + app.build() + + assert "build succeeded" in status.getvalue() # Build succeeded + warnings = warning.getvalue().strip() + assert warnings == "" + + get_sphinx_app_doctree(app, filename="index.doctree", regress=True) + + @pytest.mark.sphinx( buildername="html", srcdir=os.path.join(SOURCE_DIR, "includes"), freshenv=True ) diff --git a/tests/test_sphinx/test_sphinx_builds/test_basic.xml b/tests/test_sphinx/test_sphinx_builds/test_basic.xml new file mode 100644 index 00000000..fbd00f0b --- /dev/null +++ b/tests/test_sphinx/test_sphinx_builds/test_basic.xml @@ -0,0 +1,91 @@ + + +
+ + Header + <comment xml:space="preserve"> + comment + <note> + <paragraph> + abcd + <emphasis> + abc + + <reference refuri="https://www.google.com"> + google + <warning> + <paragraph> + xyz + <target refid="target2"> + <figure align="default" ids="id1 target2" names="target2"> + <image candidates="{'*': 'example.jpg'}" height="40px" uri="example.jpg"> + <caption> + Caption + <paragraph> + <image alt="alternative text" candidates="{'*': 'example.jpg'}" uri="example.jpg"> + <paragraph> + <reference refuri="https://www.google.com"> + https://www.google.com + <paragraph> + <strong> + <literal classes="code"> + a=1{`} + <paragraph> + <math> + sdfds + <paragraph> + <strong> + <math> + a=1 + <target refid="equation-eq-label"> + <math_block docname="content" ids="equation-eq-label" label="eq:label" nowrap="False" number="1" xml:space="preserve"> + c=2 + <paragraph> + <pending_xref refdoc="content" refdomain="math" refexplicit="False" reftarget="eq:label" reftype="eq" refwarn="True"> + <literal classes="xref eq"> + eq:label + <paragraph> + <literal> + a=1{`} + <paragraph> + this + + is + + a + + paragraph + <comment xml:space="preserve"> + a comment 2 + <paragraph> + this is a second paragraph + <bullet_list> + <list_item> + <paragraph> + a list + <bullet_list> + <list_item> + <paragraph> + a sub list + <comment xml:space="preserve"> + a comment 3 + <bullet_list> + <list_item> + <paragraph> + new list? + <paragraph> + <pending_xref refdoc="content" refdomain="std" refexplicit="False" reftarget="target" reftype="ref" refwarn="True"> + <inline classes="xref std std-ref"> + target + + <pending_xref refdoc="content" refdomain="std" refexplicit="False" reftarget="target2" reftype="ref" refwarn="True"> + <inline classes="xref std std-ref"> + target2 + <comment classes="block_break" xml:space="preserve"> + a block break + <paragraph> + <reference refuri="https://www.google.com" title="a title"> + name + <literal_block language="default" xml:space="preserve"> + def func(a, b=1): + print(a) diff --git a/tests/test_sphinx/test_sphinx_builds/test_conf_values.xml b/tests/test_sphinx/test_sphinx_builds/test_conf_values.xml new file mode 100644 index 00000000..699ce205 --- /dev/null +++ b/tests/test_sphinx/test_sphinx_builds/test_conf_values.xml @@ -0,0 +1,8 @@ +<document source="index.md"> + <section ids="test" names="test"> + <title> + Test + <paragraph> + *disabled* + <math_block nowrap="False" number="True" xml:space="preserve"> + a=1 From bc0f77fdb89d966007c92e15efc508a70e766b46 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 05:26:27 +0000 Subject: [PATCH 03/32] remove archive --- _archive/_docutils_renderer.py | 1243 -------------------------------- 1 file changed, 1243 deletions(-) delete mode 100644 _archive/_docutils_renderer.py diff --git a/_archive/_docutils_renderer.py b/_archive/_docutils_renderer.py deleted file mode 100644 index 1fe3f45f..00000000 --- a/_archive/_docutils_renderer.py +++ /dev/null @@ -1,1243 +0,0 @@ -from collections import OrderedDict -from contextlib import contextmanager -import copy -from os.path import splitext -from pathlib import Path -import re -import sys -from typing import List, Optional -from urllib.parse import urlparse, unquote - -from docutils import nodes -from docutils.frontend import OptionParser -from docutils.languages import get_language -from docutils.parsers.rst import directives, Directive, DirectiveError, roles -from docutils.parsers.rst import Parser as RSTParser -from docutils.parsers.rst.directives.misc import Include -from docutils.parsers.rst.states import RSTStateMachine, Body, Inliner -from docutils.statemachine import StringList -from docutils.utils import new_document, Reporter -import yaml - -from mistletoe import block_tokens, block_tokens_ext, span_tokens, span_tokens_ext -from mistletoe.base_elements import SourceLines -from mistletoe.renderers.base import BaseRenderer -from mistletoe.parse_context import get_parse_context, ParseContext - -from myst_parser import block_tokens as myst_block_tokens -from myst_parser import span_tokens as myst_span_tokens -from myst_parser.parse_directives import parse_directive_text, DirectiveParsingError -from myst_parser.utils import escape_url - - -class DocutilsRenderer(BaseRenderer): - """A mistletoe renderer to populate (in-place) a `docutils.document` AST. - - Note this renderer has no dependencies on Sphinx. - """ - - default_block_tokens = ( - block_tokens.HTMLBlock, - myst_block_tokens.LineComment, - block_tokens.BlockCode, - block_tokens.Heading, - myst_block_tokens.Quote, - block_tokens.CodeFence, - block_tokens.ThematicBreak, - myst_block_tokens.BlockBreak, - myst_block_tokens.List, - block_tokens_ext.Table, - block_tokens_ext.Footnote, - block_tokens.LinkDefinition, - myst_block_tokens.Paragraph, - ) - - default_span_tokens = ( - span_tokens.EscapeSequence, - myst_span_tokens.Role, - span_tokens.HTMLSpan, - span_tokens.AutoLink, - myst_span_tokens.Target, - span_tokens.CoreTokens, - span_tokens_ext.FootReference, - span_tokens_ext.Math, - # TODO there is no matching core element in docutils for strikethrough - # span_tokens_ext.Strikethrough, - span_tokens.InlineCode, - span_tokens.LineBreak, - span_tokens.RawText, - ) - - def __init__( - self, - document: Optional[nodes.document] = None, - current_node: Optional[nodes.Element] = None, - config: Optional[dict] = None, - parse_context: Optional[ParseContext] = None, - ): - """Initialise the renderer. - - :param document: The document to populate (or create a new one if None) - :param current_node: The root node from which to begin populating - (default is document, or should be an ancestor of document) - :param config: contains configuration specific to the rendering process - :param parse_context: the parse context stores global parsing variables, - such as the block/span tokens to search for, - and link/footnote definitions that have been collected. - If None, a new context will be instatiated, with the default - block/span tokens for this renderer. - These will be re-instatiated on ``__enter__``. - :type parse_context: mistletoe.parse_context.ParseContext - """ - self.config = config or {} - self.document = document or self.new_document() # type: nodes.document - self.reporter = self.document.reporter # type: Reporter - self.current_node = current_node or self.document # type: nodes.Element - self.language_module = self.document.settings.language_code # type: str - get_language(self.language_module) - self._level_to_elem = {0: self.document} - - super().__init__(parse_context=parse_context) - - def new_document(self, source_path="notset") -> nodes.document: - """Create a new docutils document.""" - settings = OptionParser(components=(RSTParser,)).get_default_values() - return new_document(source_path, settings=settings) - - def add_line_and_source_path(self, node, token): - """Copy the line number and document source path to the docutils node.""" - try: - node.line = token.position.line_start + 1 - except (AttributeError, TypeError): - pass - node.source = self.document["source"] - - def nested_render_text(self, text: str, lineno: int, token): - """Render unparsed text.""" - lines = SourceLines( - text, - start_line=lineno, - uri=self.document["source"], - metadata=token.position.data, - standardize_ends=True, - ) - doc_token = myst_block_tokens.Document.read( - lines, front_matter=True, reset_definitions=False - ) - # TODO think if this is the best way: here we consume front matter, - # but then remove it. this is for example if includes have front matter - doc_token.front_matter = None - # we mark the token as nested so that footnotes etc aren't rendered - doc_token.is_nested = True - self.render(doc_token) - - def render_children(self, token): - for child in token.children: - self.render(child) - - @contextmanager - def current_node_context(self, node, append: bool = False): - """Context manager for temporarily setting the current node.""" - if append: - self.current_node.append(node) - current_node = self.current_node - self.current_node = node - yield - self.current_node = current_node - - def render_document(self, token: block_tokens.Document): - if token.front_matter: - self.render_front_matter(token.front_matter) - self.render_children(token) - - if getattr(token, "is_nested", False): - # if the document is nested in another, we don't want to output footnotes - return self.document - - # we use the footnotes stored in the global context, - # rather than those stored on the document, - # since additional references may have been made in nested parses - footnotes = get_parse_context().foot_definitions - - # we don't use the foot_references stored on the global context, - # since references within directives/roles will have been added after - # those from the initial markdown parse - # instead we gather them from a walk of the created document - # foot_refs = get_parse_context().foot_references - foot_refs = OrderedDict() - for refnode in self.document.traverse(nodes.footnote_reference): - if refnode["refname"] not in foot_refs: - foot_refs[refnode["refname"]] = True - - if foot_refs: - self.current_node.append(nodes.transition()) - for footref in foot_refs: - if footref in footnotes: - self.render_footnote(footnotes[footref]) - - return self.document - - def render_front_matter(self, token): - """Pass document front matter data - - For RST, all field lists are captured by - ``docutils.docutils.parsers.rst.states.Body.field_marker``, - then, if one occurs at the document, it is transformed by - `docutils.docutils.transforms.frontmatter.DocInfo`, and finally - this is intercepted by sphinx and added to the env in - `sphinx.environment.collectors.metadata.MetadataCollector.process_doc` - - So technically the values should be parsed to AST, but this is redundant, - since `process_doc` just converts them back to text. - - """ - try: - data = token.get_data() - except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: - msg_node = self.reporter.error( - "Front matter block:\n" + str(error), line=token.position.line_start - ) - msg_node += nodes.literal_block(token.content, token.content) - self.current_node += [msg_node] - return - - docinfo = dict_to_docinfo(data) - self.current_node.append(docinfo) - - def render_footnote(self, token: block_tokens_ext.Footnote): - footnote = nodes.footnote() - self.add_line_and_source_path(footnote, token) - # footnote += nodes.label('', token.target) - footnote["names"].append(token.target) - footnote["auto"] = 1 - self.document.note_autofootnote(footnote) - self.document.note_explicit_target(footnote, footnote) - # TODO for now we wrap the content (which are list of spans tokens) - # in a paragraph, but eventually upstream in mistletoe this will already be - # block level tokens - self.current_node.append(footnote) - paragraph = nodes.paragraph("") - self.add_line_and_source_path(paragraph, token) - footnote.append(paragraph) - with self.current_node_context(paragraph, append=False): - self.render_children(token) - - def render_foot_reference(self, token): - """Footnote references are added as auto-numbered, - .i.e. `[^a]` is read as rST `[#a]_` - """ - refnode = nodes.footnote_reference("[^{}]".format(token.target)) - self.add_line_and_source_path(refnode, token) - refnode["auto"] = 1 - refnode["refname"] = token.target - # refnode += nodes.Text(token.target) - self.document.note_autofootnote_ref(refnode) - self.document.note_footnote_ref(refnode) - self.current_node.append(refnode) - - def render_paragraph(self, token): - if len(token.children) == 1 and isinstance( - token.children[0], myst_span_tokens.Target - ): - # promote the target to block level - return self.render_target(token.children[0]) - para = nodes.paragraph("") - self.add_line_and_source_path(para, token) - with self.current_node_context(para, append=True): - self.render_children(token) - - def render_line_comment(self, token): - self.current_node.append(nodes.comment(token.content, token.content)) - - def render_target(self, token): - text = token.children[0].content - name = nodes.fully_normalize_name(text) - target = nodes.target(text) - target["names"].append(name) - self.add_line_and_source_path(target, token) - self.document.note_explicit_target(target, self.current_node) - self.current_node.append(target) - - def render_raw_text(self, token): - text = token.content - self.current_node.append(nodes.Text(text, text)) - - def render_escape_sequence(self, token): - text = token.children[0].content - self.current_node.append(nodes.Text(text, text)) - - def render_line_break(self, token): - if token.soft: - self.current_node.append(nodes.Text("\n")) - else: - self.current_node.append(nodes.raw("", "<br />\n", format="html")) - - def render_strong(self, token): - node = nodes.strong() - self.add_line_and_source_path(node, token) - with self.current_node_context(node, append=True): - self.render_children(token) - - def render_emphasis(self, token): - node = nodes.emphasis() - self.add_line_and_source_path(node, token) - with self.current_node_context(node, append=True): - self.render_children(token) - - def render_quote(self, token): - quote = nodes.block_quote() - self.add_line_and_source_path(quote, token) - with self.current_node_context(quote, append=True): - self.render_children(token) - - def render_strikethrough(self, token): - # TODO there is no existing node/role for this - raise NotImplementedError - - def render_thematic_break(self, token): - node = nodes.transition() - self.add_line_and_source_path(node, token) - self.current_node.append(node) - - def render_block_break(self, token): - block_break = nodes.comment(token.content, token.content) - block_break["classes"] += ["block_break"] - self.add_line_and_source_path(block_break, token) - self.current_node.append(block_break) - - def render_math(self, token): - if token.content.startswith("$$"): - content = token.content[2:-2] - node = nodes.math_block(content, content, nowrap=False, number=None) - else: - content = token.content[1:-1] - node = nodes.math(content, content) - self.add_line_and_source_path(node, token) - self.current_node.append(node) - - def render_block_code(self, token): - # this should never have a language, since it is just indented text, however, - # creating a literal_block with no language will raise a warning in sphinx - text = token.children[0].content - language = token.language or "none" - node = nodes.literal_block(text, text, language=language) - self.add_line_and_source_path(node, token) - self.current_node.append(node) - - def render_code_fence(self, token): - if token.language.startswith("{") and token.language.endswith("}"): - return self.render_directive(token) - - text = token.children[0].content - language = token.language - if not language: - try: - sphinx_env = self.document.settings.env - language = sphinx_env.temp_data.get( - "highlight_language", sphinx_env.config.highlight_language - ) - except AttributeError: - pass - if not language: - language = self.config.get("highlight_language", "") - node = nodes.literal_block(text, text, language=language) - self.add_line_and_source_path(node, token) - self.current_node.append(node) - - def render_inline_code(self, token): - text = token.children[0].content - node = nodes.literal(text, text) - self.add_line_and_source_path(node, token) - self.current_node.append(node) - - def _is_section_level(self, level, section): - return self._level_to_elem.get(level, None) == section - - def _add_section(self, section, level): - parent_level = max( - section_level - for section_level in self._level_to_elem - if level > section_level - ) - parent = self._level_to_elem[parent_level] - parent.append(section) - self._level_to_elem[level] = section - - # Prune level to limit - self._level_to_elem = dict( - (section_level, section) - for section_level, section in self._level_to_elem.items() - if section_level <= level - ) - - def render_heading(self, token): - # Test if we're replacing a section level first - if isinstance(self.current_node, nodes.section): - if self._is_section_level(token.level, self.current_node): - self.current_node = self.current_node.parent - - title_node = nodes.title() - self.add_line_and_source_path(title_node, token) - - new_section = nodes.section() - self.add_line_and_source_path(new_section, token) - new_section.append(title_node) - - self._add_section(new_section, token.level) - - self.current_node = title_node - self.render_children(token) - - assert isinstance(self.current_node, nodes.title) - text = self.current_node.astext() - # if self.translate_section_name: - # text = self.translate_section_name(text) - name = nodes.fully_normalize_name(text) - section = self.current_node.parent - section["names"].append(name) - self.document.note_implicit_target(section, section) - self.current_node = section - - def handle_cross_reference(self, token, destination): - # TODO use the docutils error reporting mechanisms, rather than raising - if not self.config.get("ignore_missing_refs", False): - raise NotImplementedError( - "reference not found in current document: {}\n{}".format( - destination, token - ) - ) - - def render_link(self, token): - ref_node = nodes.reference() - self.add_line_and_source_path(ref_node, token) - # Check destination is supported for cross-linking and remove extension - # TODO escape urls? - destination = token.target - _, ext = splitext(destination) - # TODO check for other supported extensions, such as those specified in - # the Sphinx conf.py file but how to access this information? - # TODO this should probably only remove the extension for local paths, - # i.e. not uri's starting with http or other external prefix. - - # if ext.replace('.', '') in self.supported: - # destination = destination.replace(ext, '') - ref_node["refuri"] = destination - if token.title: - ref_node["title"] = token.title - next_node = ref_node - - url_check = urlparse(destination) - # If there's not a url scheme (e.g. 'https' for 'https:...' links), - # or there is a scheme but it's not in the list of known_url_schemes, - # then assume it's a cross-reference - known_url_schemes = self.config.get("known_url_schemes", None) - if known_url_schemes: - scheme_known = url_check.scheme in known_url_schemes - else: - scheme_known = bool(url_check.scheme) - - if not url_check.fragment and not scheme_known: - self.handle_cross_reference(token, destination) - else: - self.current_node.append(next_node) - with self.current_node_context(ref_node): - self.render_children(token) - - def render_image(self, token): - img_node = nodes.image() - self.add_line_and_source_path(img_node, token) - img_node["uri"] = token.src - - img_node["alt"] = "" - if token.children and isinstance(token.children[0], span_tokens.RawText): - img_node["alt"] = token.children[0].content - token.children[0].content = "" - - self.current_node.append(img_node) - # TODO how should non-raw alternative text be handled? - # with self.set_current_node(img_node): - # self.render_children(token) - - def render_list(self, token): - list_node = None - if token.start_at is not None: - list_node = nodes.enumerated_list() - # TODO deal with token.start_at? - # TODO support numerals/letters for lists - # (see https://stackoverflow.com/a/48372856/5033292) - # See docutils/docutils/parsers/rst/states.py:Body.enumerator - # list_node['enumtype'] = 'arabic', 'loweralpha', 'upperroman', etc. - # list_node['start'] - # list_node['prefix'] - # list_node['suffix'] - else: - list_node = nodes.bullet_list() - # TODO deal with token.loose? - self.add_line_and_source_path(list_node, token) - - self.current_node.append(list_node) - with self.current_node_context(list_node): - self.render_children(token) - - def render_list_item(self, token: myst_block_tokens.ListItem): - item_node = nodes.list_item() - self.add_line_and_source_path(item_node, token) - self.current_node.append(item_node) - with self.current_node_context(item_node): - self.render_children(token) - - def render_table(self, token): - table = nodes.table() - table["classes"] += ["colwidths-auto"] - # TODO column alignment - maxcols = max(len(c.children) for c in token.children) - # TODO are colwidths actually required - colwidths = [100 / maxcols] * maxcols - tgroup = nodes.tgroup(cols=len(colwidths)) - table += tgroup - for colwidth in colwidths: - colspec = nodes.colspec(colwidth=colwidth) - tgroup += colspec - - if hasattr(token, "header"): - thead = nodes.thead() - tgroup += thead - with self.current_node_context(thead): - self.render_table_row(token.header) - - tbody = nodes.tbody() - tgroup += tbody - - with self.current_node_context(tbody): - self.render_children(token) - - self.current_node.append(table) - - def render_table_row(self, token): - row = nodes.row() - with self.current_node_context(row, append=True): - self.render_children(token) - - def render_table_cell(self, token): - entry = nodes.entry() - with self.current_node_context(entry, append=True): - self.render_children(token) - - def render_auto_link(self, token): - if token.mailto: - refuri = "mailto:{}".format(token.target) - else: - refuri = escape_url(token.target) - ref_node = nodes.reference(token.target, token.target, refuri=refuri) - self.add_line_and_source_path(ref_node, token) - self.current_node.append(ref_node) - - def render_html_span(self, token): - self.current_node.append(nodes.raw("", token.content, format="html")) - - def render_html_block(self, token): - self.current_node.append(nodes.raw("", token.content, format="html")) - - def render_role(self, token): - content = token.children[0].content - name = token.role_name - # TODO role name white/black lists - try: - lineno = token.position.line_start - except (AttributeError, TypeError): - lineno = 0 - inliner = MockInliner(self, lineno) - role_func, messages = roles.role( - name, self.language_module, lineno, self.reporter - ) - rawsource = ":{}:`{}`".format(name, content) - # # backslash escapes converted to nulls (``\x00``) - text = span_tokens.EscapeSequence.strip(content) - if role_func: - nodes, messages2 = role_func(name, rawsource, text, lineno, inliner) - # return nodes, messages + messages2 - self.current_node += nodes - else: - message = self.reporter.error( - 'Unknown interpreted text role "{}".'.format(name), line=lineno - ) - # return ([self.problematic(content, content, msg)], messages + [msg]) - problematic = inliner.problematic(text, rawsource, message) - self.current_node += problematic - - def render_directive(self, token): - """Render special fenced code blocks as directives.""" - name = token.language[1:-1] - # TODO directive name white/black lists - content = token.children[0].content - self.document.current_line = token.position.line_start - - # get directive class - directive_class, messages = directives.directive( - name, self.language_module, self.document - ) # type: (Directive, list) - if not directive_class: - error = self.reporter.error( - "Unknown directive type '{}'\n".format(name), - # nodes.literal_block(content, content), - line=token.position.line_start, - ) - self.current_node += [error] + messages - return - - try: - arguments, options, body_lines = parse_directive_text( - directive_class, token.arguments, content - ) - except DirectiveParsingError as error: - error = self.reporter.error( - "Directive '{}':\n{}".format(name, error), - nodes.literal_block(content, content), - line=token.position.line_start, - ) - self.current_node += [error] - return - - # initialise directive - if issubclass(directive_class, Include): - directive_instance = MockIncludeDirective( - self, - name=name, - klass=directive_class, - arguments=arguments, - options=options, - body=body_lines, - token=token, - ) - else: - state_machine = MockStateMachine(self, token.position.line_start) - state = MockState( - self, state_machine, token.position.line_start, token=token - ) - directive_instance = directive_class( - name=name, - # the list of positional arguments - arguments=arguments, - # a dictionary mapping option names to values - options=options, - # the directive content line by line - content=StringList(body_lines, self.document["source"]), - # the absolute line number of the first line of the directive - lineno=token.position.line_start, - # the line offset of the first line of the content - content_offset=0, # TODO get content offset from `parse_directive_text` - # a string containing the entire directive - block_text="\n".join(body_lines), - state=state, - state_machine=state_machine, - ) - - # run directive - try: - result = directive_instance.run() - except DirectiveError as error: - msg_node = self.reporter.system_message( - error.level, error.msg, line=token.position.line_start - ) - msg_node += nodes.literal_block(content, content) - result = [msg_node] - except MockingError as exc: - error = self.reporter.error( - "Directive '{}' cannot be mocked:\n{}: {}".format( - name, exc.__class__.__name__, exc - ), - nodes.literal_block(content, content), - line=token.position.line_start, - ) - self.current_node += [error] - return - assert isinstance( - result, list - ), 'Directive "{}" must return a list of nodes.'.format(name) - for i in range(len(result)): - assert isinstance( - result[i], nodes.Node - ), 'Directive "{}" returned non-Node object (index {}): {}'.format( - name, i, result[i] - ) - self.current_node += result - - -class SphinxRenderer(DocutilsRenderer): - """A mistletoe renderer to populate (in-place) a `docutils.document` AST. - - This is sub-class of `DocutilsRenderer` that handles sphinx cross-referencing. - """ - - def __init__(self, *args, **kwargs): - """Intitalise SphinxRenderer - - :param load_sphinx_env: load a basic sphinx environment, - when using the renderer as a context manager outside if `sphinx-build` - :param sphinx_conf: a dictionary representation of the sphinx `conf.py` - :param sphinx_srcdir: a path to a source directory - (for example, can be used for `include` statements) - - To use this renderer in a 'standalone' fashion:: - - from myst_parser.block_tokens import Document - - with SphinxRenderer(load_sphinx_env=True, sphinx_conf={}) as renderer: - renderer.render(Document.read("source text")) - - """ - self.load_sphinx_env = kwargs.pop("load_sphinx_env", False) - self.sphinx_conf = kwargs.pop("sphinx_conf", None) - self.sphinx_srcdir = kwargs.pop("sphinx_srcdir", None) - super().__init__(*args, **kwargs) - - def handle_cross_reference(self, token, destination): - from sphinx import addnodes - - wrap_node = addnodes.pending_xref( - reftarget=unquote(destination), - reftype="any", - refdomain=None, # Added to enable cross-linking - refexplicit=len(token.children) > 0, - refwarn=True, - ) - self.add_line_and_source_path(wrap_node, token) - if token.title: - wrap_node["title"] = token.title - self.current_node.append(wrap_node) - text_node = nodes.literal("", "", classes=["xref", "any"]) - wrap_node.append(text_node) - with self.current_node_context(text_node): - self.render_children(token) - - def mock_sphinx_env(self, configuration=None, sourcedir=None): - """Create a minimimal Sphinx environment; - loading sphinx roles, directives, etc. - """ - from sphinx.application import builtin_extensions, Sphinx - from sphinx.config import Config - from sphinx.environment import BuildEnvironment - from sphinx.events import EventManager - from sphinx.project import Project - from sphinx.registry import SphinxComponentRegistry - from sphinx.util.tags import Tags - - class MockSphinx(Sphinx): - """Minimal sphinx init to load roles and directives.""" - - def __init__(self, confoverrides=None, srcdir=None): - self.extensions = {} - self.registry = SphinxComponentRegistry() - self.html_themes = {} - self.events = EventManager(self) - self.tags = Tags(None) - self.config = Config({}, confoverrides or {}) - self.config.pre_init_values() - self._init_i18n() - for extension in builtin_extensions: - self.registry.load_extension(self, extension) - # fresh env - self.doctreedir = None - self.srcdir = srcdir - self.confdir = None - self.outdir = None - self.project = Project(srcdir=srcdir, source_suffix=".md") - self.project.docnames = ["mock_docname"] - self.env = BuildEnvironment() - self.env.setup(self) - self.env.temp_data["docname"] = "mock_docname" - self.builder = None - - if not confoverrides: - return - - # this code is only required for more complex parsing with extensions - for extension in self.config.extensions: - self.setup_extension(extension) - buildername = "dummy" - self.preload_builder(buildername) - self.config.init_values() - self.events.emit("config-inited", self.config) - import tempfile - - with tempfile.TemporaryDirectory() as tempdir: - # creating a builder attempts to make the doctreedir - self.doctreedir = tempdir - self.builder = self.create_builder(buildername) - self.doctreedir = None - - app = MockSphinx(confoverrides=configuration, srcdir=sourcedir) - self.document.settings.env = app.env - return app - - def __enter__(self): - """If `load_sphinx_env=True`, we set up an environment, - to parse sphinx roles/directives, outside of a `sphinx-build`. - - This primarily copies the code in `sphinx.util.docutils.docutils_namespace` - and `sphinx.util.docutils.sphinx_domains`. - """ - if not self.load_sphinx_env: - return super().__enter__() - - # store currently loaded roles/directives, so we can revert on exit - self._directives = copy.copy(directives._directives) - self._roles = copy.copy(roles._roles) - # Monkey-patch directive and role dispatch, - # so that sphinx domain-specific markup takes precedence. - self._env = self.mock_sphinx_env( - configuration=self.sphinx_conf, sourcedir=self.sphinx_srcdir - ).env - from sphinx.util.docutils import sphinx_domains - - self._sphinx_domains = sphinx_domains(self._env) - self._sphinx_domains.enable() - - return super().__enter__() - - def __exit__(self, exception_type, exception_val, traceback): - if not self.load_sphinx_env: - return super().__exit__(exception_type, exception_val, traceback) - # revert loaded roles/directives - directives._directives = self._directives - roles._roles = self._roles - self._directives = None - self._roles = None - # unregister nodes (see `sphinx.util.docutils.docutils_namespace`) - from sphinx.util.docutils import additional_nodes, unregister_node - - for node in list(additional_nodes): - unregister_node(node) - additional_nodes.discard(node) - # revert directive/role function (see `sphinx.util.docutils.sphinx_domains`) - self._sphinx_domains.disable() - self._sphinx_domains = None - self._env = None - return super().__exit__(exception_type, exception_val, traceback) - - -class MockingError(Exception): - """An exception to signal an error during mocking of docutils components.""" - - -class MockInliner: - """A mock version of `docutils.parsers.rst.states.Inliner`. - - This is parsed to role functions. - """ - - def __init__(self, renderer: DocutilsRenderer, lineno: int): - self._renderer = renderer - self.document = renderer.document - self.reporter = renderer.document.reporter - if not hasattr(self.reporter, "get_source_and_line"): - # TODO this is called by some roles, - # but I can't see how that would work in RST? - self.reporter.get_source_and_line = lambda l: (self.document["source"], l) - self.parent = renderer.current_node - self.language = renderer.language_module - self.rfc_url = "rfc%d.html" - - def problematic(self, text: str, rawsource: str, message: nodes.system_message): - msgid = self.document.set_id(message, self.parent) - problematic = nodes.problematic(rawsource, rawsource, refid=msgid) - prbid = self.document.set_id(problematic) - message.add_backref(prbid) - return problematic - - # TODO add parse method - - def __getattr__(self, name): - """This method is only be called if the attribute requested has not - been defined. Defined attributes will not be overridden. - """ - # TODO use document.reporter mechanism? - if hasattr(Inliner, name): - msg = "{cls} has not yet implemented attribute '{name}'".format( - cls=type(self).__name__, name=name - ) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - - -class MockState: - """A mock version of `docutils.parsers.rst.states.RSTState`. - - This is parsed to the `Directives.run()` method, - so that they may run nested parses on their content that will be parsed as markdown, - rather than RST. - """ - - def __init__( - self, - renderer: DocutilsRenderer, - state_machine: "MockStateMachine", - lineno: int, - token, - ): - self._renderer = renderer - self._lineno = lineno - self._token = token - self.document = renderer.document - self.state_machine = state_machine - - class Struct: - document = self.document - reporter = self.document.reporter - language = self.document.settings.language_code - title_styles = [] - section_level = max(renderer._level_to_elem) - section_bubble_up_kludge = False - inliner = MockInliner(renderer, lineno) - - self.memo = Struct - - def nested_parse( - self, - block: StringList, - input_offset: int, - node: nodes.Element, - match_titles: bool = False, - state_machine_class=None, - state_machine_kwargs=None, - ): - current_match_titles = self.state_machine.match_titles - self.state_machine.match_titles = match_titles - with self._renderer.current_node_context(node): - self._renderer.nested_render_text( - block, self._lineno + input_offset, token=self._token - ) - self.state_machine.match_titles = current_match_titles - - def inline_text(self, text: str, lineno: int): - # TODO return messages? - messages = [] - paragraph = nodes.paragraph("") - # here we instatiate a new renderer, - # so that the nested parse does not effect the current renderer, - # but we use the same global parse context, so that link references, etc - # are added to the global parse. - renderer = self._renderer.__class__( - document=self.document, - current_node=paragraph, - parse_context=get_parse_context(), - ) - lines = SourceLines( - text, - start_line=self._lineno, - uri=self.document["source"], - metadata=self._token.position.data, - standardize_ends=True, - ) - doc_token = myst_block_tokens.Document.read( - lines, front_matter=False, reset_definitions=False - ) - # we mark the token as nested so that footnotes etc aren't rendered - doc_token.is_nested = True - renderer.render(doc_token) - textnodes = [] - if paragraph.children: - # first child should be paragraph - textnodes = paragraph.children[0].children - return textnodes, messages - - # U+2014 is an em-dash: - attribution_pattern = re.compile("^((?:---?(?!-)|\u2014) *)(.+)") - - def block_quote(self, lines: List[str], line_offset: int): - """Parse a block quote, which is a block of text, - followed by an (optional) attribution. - - :: - - No matter where you go, there you are. - - -- Buckaroo Banzai - """ - elements = [] - # split attribution - last_line_blank = False - blockquote_lines = lines - attribution_lines = [] - attribution_line_offset = None - # First line after a blank line must begin with a dash - for i, line in enumerate(lines): - if not line.strip(): - last_line_blank = True - continue - if not last_line_blank: - last_line_blank = False - continue - last_line_blank = False - match = self.attribution_pattern.match(line) - if not match: - continue - attribution_line_offset = i - attribution_lines = [match.group(2)] - for at_line in lines[i + 1 :]: - indented_line = at_line[len(match.group(1)) :] - if len(indented_line) != len(at_line.lstrip()): - break - attribution_lines.append(indented_line) - blockquote_lines = lines[:i] - break - # parse block - blockquote = nodes.block_quote() - self.nested_parse(blockquote_lines, line_offset, blockquote) - elements.append(blockquote) - # parse attribution - if attribution_lines: - attribution_text = "\n".join(attribution_lines) - lineno = self._lineno + line_offset + attribution_line_offset - textnodes, messages = self.inline_text(attribution_text, lineno) - attribution = nodes.attribution(attribution_text, "", *textnodes) - ( - attribution.source, - attribution.line, - ) = self.state_machine.get_source_and_line(lineno) - blockquote += attribution - elements += messages - return elements - - def build_table(self, tabledata, tableline, stub_columns=0, widths=None): - return Body.build_table(self, tabledata, tableline, stub_columns, widths) - - def build_table_row(self, rowdata, tableline): - return Body.build_table_row(self, rowdata, tableline) - - def __getattr__(self, name): - """This method is only be called if the attribute requested has not - been defined. Defined attributes will not be overridden. - """ - if hasattr(Body, name): - msg = "{cls} has not yet implemented attribute '{name}'".format( - cls=type(self).__name__, name=name - ) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - - -class MockStateMachine: - """A mock version of `docutils.parsers.rst.states.RSTStateMachine`. - - This is parsed to the `Directives.run()` method. - """ - - def __init__(self, renderer: DocutilsRenderer, lineno: int): - self._renderer = renderer - self._lineno = lineno - self.document = renderer.document - self.reporter = self.document.reporter - self.node = renderer.current_node - self.match_titles = True - - # TODO to allow to access like attributes like input_lines, - # we would need to store the input lines, - # probably via the `Document` token, - # and maybe self._lines = lines[:], then for AstRenderer, - # ignore private attributes - - def get_source(self, lineno: Optional[int] = None): - """Return document source path.""" - return self.document["source"] - - def get_source_and_line(self, lineno: Optional[int] = None): - """Return (source path, line) tuple for current or given line number.""" - return self.document["source"], lineno or self._lineno - - def __getattr__(self, name): - """This method is only be called if the attribute requested has not - been defined. Defined attributes will not be overridden. - """ - if hasattr(RSTStateMachine, name): - msg = "{cls} has not yet implemented attribute '{name}'".format( - cls=type(self).__name__, name=name - ) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - msg = "{cls} has no attribute {name}".format(cls=type(self).__name__, name=name) - raise MockingError(msg).with_traceback(sys.exc_info()[2]) - - -class MockIncludeDirective: - """This directive uses a lot of statemachine logic that is not yet mocked. - Therefore, we treat it as a special case (at least for now). - - See: - https://docutils.sourceforge.io/docs/ref/rst/directives.html#including-an-external-document-fragment - """ - - def __init__( - self, - renderer: DocutilsRenderer, - name: str, - klass: Include, - arguments: list, - options: dict, - body: List[str], - token, - ): - self.renderer = renderer - self.document = renderer.document - self.name = name - self.klass = klass - self.arguments = arguments - self.options = options - self.body = body - self.lineno = token.position.line_start - self.token = token - - def run(self): - - from docutils.parsers.rst.directives.body import CodeBlock, NumberLines - - if not self.document.settings.file_insertion_enabled: - raise DirectiveError(2, 'Directive "{}" disabled.'.format(self.name)) - - source_dir = Path(self.document["source"]).absolute().parent - include_arg = "".join([s.strip() for s in self.arguments[0].splitlines()]) - - if include_arg.startswith("<") and include_arg.endswith(">"): - # # docutils "standard" includes - path = Path(self.klass.standard_include_path).joinpath(include_arg[1:-1]) - else: - # if using sphinx interpret absolute paths "correctly", - # i.e. relative to source directory - try: - sphinx_env = self.document.settings.env - _, include_arg = sphinx_env.relfn2path(self.arguments[0]) - sphinx_env.note_included(include_arg) - except AttributeError: - pass - path = Path(include_arg) - path = source_dir.joinpath(path) - - # read file - encoding = self.options.get("encoding", self.document.settings.input_encoding) - error_handler = self.document.settings.input_encoding_error_handler - # tab_width = self.options.get("tab-width", self.document.settings.tab_width) - try: - file_content = path.read_text(encoding=encoding, errors=error_handler) - except Exception as error: - raise DirectiveError( - 4, - 'Directive "{}": error reading file: {}\n{error}.'.format( - self.name, path, error - ), - ) - - # get required section of text - startline = self.options.get("start-line", None) - endline = self.options.get("end-line", None) - file_content = "\n".join(file_content.splitlines()[startline:endline]) - startline = startline or 0 - for split_on_type in ["start-after", "end-before"]: - split_on = self.options.get(split_on_type, None) - if not split_on: - continue - split_index = file_content.find(split_on) - if split_index < 0: - raise DirectiveError( - 4, - 'Directive "{}"; option "{}": text not found "{}".'.format( - self.name, split_on_type, split_on - ), - ) - if split_on_type == "start-after": - startline += split_index + len(split_on) - file_content = file_content[split_index + len(split_on) :] - else: - file_content = file_content[:split_index] - - if "literal" in self.options: - literal_block = nodes.literal_block( - file_content, source=str(path), classes=self.options.get("class", []) - ) - literal_block.line = 1 # TODO don;t think this should be 1? - self.add_name(literal_block) - if "number-lines" in self.options: - try: - startline = int(self.options["number-lines"] or 1) - except ValueError: - raise DirectiveError( - 3, ":number-lines: with non-integer " "start value" - ) - endline = startline + len(file_content.splitlines()) - if file_content.endswith("\n"): - file_content = file_content[:-1] - tokens = NumberLines([([], file_content)], startline, endline) - for classes, value in tokens: - if classes: - literal_block += nodes.inline(value, value, classes=classes) - else: - literal_block += nodes.Text(value) - else: - literal_block += nodes.Text(file_content) - return [literal_block] - if "code" in self.options: - self.options["source"] = str(path) - state_machine = MockStateMachine(self.renderer, self.lineno) - state = MockState(self.renderer, state_machine, self.lineno, self.token) - codeblock = CodeBlock( - name=self.name, - arguments=[self.options.pop("code")], - options=self.options, - content=file_content.splitlines(), - lineno=self.lineno, - content_offset=0, - block_text=file_content, - state=state, - state_machine=state_machine, - ) - return codeblock.run() - - # Here we perform a nested render, but temporarily setup the document/reporter - # with the correct document path and lineno for the included file. - source = self.renderer.document["source"] - rsource = self.renderer.reporter.source - line_func = getattr(self.renderer.reporter, "get_source_and_line", None) - try: - self.renderer.document["source"] = str(path) - self.renderer.reporter.source = str(path) - self.renderer.reporter.get_source_and_line = lambda l: (str(path), l) - self.renderer.nested_render_text(file_content, startline, token=self.token) - finally: - self.renderer.document["source"] = source - self.renderer.reporter.source = rsource - if line_func is not None: - self.renderer.reporter.get_source_and_line = line_func - else: - del self.renderer.reporter.get_source_and_line - return [] - - def add_name(self, node): - """Append self.options['name'] to node['names'] if it exists. - - Also normalize the name string and register it as explicit target. - """ - if "name" in self.options: - name = nodes.fully_normalize_name(self.options.pop("name")) - if "name" in node: - del node["name"] - node["names"].append(name) - self.renderer.document.note_explicit_target(node, node) - - -def dict_to_docinfo(data): - """Render a key/val pair as a docutils field node.""" - # TODO this data could be used to support default option values for directives - docinfo = nodes.docinfo() - - # Throw away all non-stringy values - # TODO: support more complex data structures as values - for key, value in data.items(): - if not isinstance(value, (str, int, float)): - continue - value = str(value) - field_node = nodes.field() - field_node.source = value - field_node += nodes.field_name(key, "", nodes.Text(key, key)) - field_node += nodes.field_body(value, nodes.Text(value, value)) - docinfo += field_node - return docinfo From 303c53757057815dec27c368400ff00c32ad2f00 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 06:09:57 +0000 Subject: [PATCH 04/32] Fix tests --- .gitignore | 2 + .pre-commit-config.yaml | 3 +- MANIFEST.in | 1 + myst_parser/cli/benchmark.py | 59 +- myst_parser/cli/spec.md | 9709 +++++++++++++++++ myst_parser/docutils_renderer.py | 83 +- myst_parser/main.py | 9 +- myst_parser/mocking.py | 5 +- tests/test_cli.py | 2 +- .../fixtures/sphinx_directives.md | 1 - .../fixtures/syntax_elements.md | 13 - tests/test_renderers/fixtures/tables.md | 87 + tests/test_renderers/test_fixtures.py | 11 + tests/test_sphinx/sourcedirs/basic/content.md | 2 +- .../test_sphinx_builds/test_basic.html | 21 +- .../test_sphinx_builds/test_basic.xml | 17 + 16 files changed, 9960 insertions(+), 65 deletions(-) create mode 100644 myst_parser/cli/spec.md create mode 100644 tests/test_renderers/fixtures/tables.md diff --git a/.gitignore b/.gitignore index b6e47617..fa271a84 100644 --- a/.gitignore +++ b/.gitignore @@ -127,3 +127,5 @@ dmypy.json # Pyre type checker .pyre/ + +_archive/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 70bd3bd2..a2356283 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,8 @@ exclude: > (?x)^( \.vscode/settings\.json| tests/test_commonmark/commonmark\.json| - .*\.xml + .*\.xml| + tests/.*/fixtures/.*\.md )$ repos: diff --git a/MANIFEST.in b/MANIFEST.in index 1aba38f6..b77d9a1b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,2 @@ include LICENSE +include myst_parser/cli/spec.md diff --git a/myst_parser/cli/benchmark.py b/myst_parser/cli/benchmark.py index 9603166b..b55ef5ea 100644 --- a/myst_parser/cli/benchmark.py +++ b/myst_parser/cli/benchmark.py @@ -1,18 +1,20 @@ import argparse from importlib import import_module -import os +from pathlib import Path import re from time import perf_counter ALL_PACKAGES = ( "python-markdown:extra", - "mistune", "commonmark.py", "mistletoe", - "myst_parser:html", - "myst_parser:docutils", - "myst_parser:sphinx", + "mistune", + "markdown-it-py", + "myst-parser:sphinx", ) +OPT_PACKAGES = ("myst-parser:html", "myst-parser:docutils") + +DEFAULT_FILE = Path(__file__).parent.joinpath("spec.md") def benchmark(package_name, version=None): @@ -55,27 +57,41 @@ def run_mistletoe(package, text): return package.markdown(text) -@benchmark("myst_parser") +@benchmark("markdown_it") +def run_markdown_it_py(package, text): + md = package.MarkdownIt("commonmark") + return md.render(text) + + +@benchmark("myst_parser.main") def run_myst_parser_html(package, text): - package.parse_text(text, "html") + package.to_html(text) -@benchmark("myst_parser") +@benchmark("myst_parser.main") def run_myst_parser_docutils(package, text): - package.parse_text(text, "docutils", config={"ignore_missing_refs": True}) + package.to_docutils( + text, renderer="docutils", options={"ignore_missing_refs": True} + ) -@benchmark("myst_parser") +@benchmark("myst_parser.main") def run_myst_parser_sphinx(package, text): - package.parse_text(text, "sphinx", load_sphinx_env=True) + package.to_docutils( + text, + renderer="sphinx", + options={"ignore_missing_refs": True}, + in_sphinx_env=True, + ) def run_all(package_names, text, num_parses): prompt = "Running {} test(s) ...".format(len(package_names)) print(prompt) print("=" * len(prompt)) + max_len = max(len(p) for p in package_names) for package_name in package_names: - print(package_name, end=" ") + print(package_name + " " * (max_len - len(package_name)), end=" ") func_name = re.sub(r"[\.\-\:]", "_", package_name.lower()) print( "{:.2f} s".format(globals()["run_{}".format(func_name)](text, num_parses)) @@ -85,14 +101,16 @@ def run_all(package_names, text, num_parses): def main(args=None): parser = argparse.ArgumentParser(description="Run benchmark test.") - parser.add_argument("path", type=str, help="the path to the file to parse") + parser.add_argument( + "-f", "--file", default=None, type=str, help="the path to the file to parse" + ) parser.add_argument( "-n", "--num-parses", metavar="NPARSES", - default=1000, + default=10, type=int, - help="The number of parse iterations (default: 1000)", + help="The number of parse iterations (default: 10)", ) parser.add_argument( "-p", @@ -100,14 +118,13 @@ def main(args=None): action="append", default=[], help="The package(s) to run (use -p multiple times).", - choices=ALL_PACKAGES, + choices=ALL_PACKAGES + OPT_PACKAGES, metavar="PACKAGE_NAME", ) args = parser.parse_args(args) + path = Path(args.file) if args.file else DEFAULT_FILE - assert os.path.exists(args.path), "path does not exist" - print("Test document: {}".format(os.path.basename(args.path))) + assert path.exists(), "path does not exist" + print("Test document: {}".format(path.name)) print("Test iterations: {}".format(args.num_parses)) - with open(args.path, "r") as handle: - text = handle.read() - return run_all(args.package or ALL_PACKAGES, text, args.num_parses) + return run_all(args.package or ALL_PACKAGES, path.read_text(), args.num_parses) diff --git a/myst_parser/cli/spec.md b/myst_parser/cli/spec.md new file mode 100644 index 00000000..b6c45820 --- /dev/null +++ b/myst_parser/cli/spec.md @@ -0,0 +1,9709 @@ +--- +title: CommonMark Spec +author: John MacFarlane +version: 0.29 +date: '2019-04-06' +license: '[CC-BY-SA 4.0](http://creativecommons.org/licenses/by-sa/4.0/)' +... + +# Introduction + +## What is Markdown? + +Markdown is a plain text format for writing structured documents, +based on conventions for indicating formatting in email +and usenet posts. It was developed by John Gruber (with +help from Aaron Swartz) and released in 2004 in the form of a +[syntax description](http://daringfireball.net/projects/markdown/syntax) +and a Perl script (`Markdown.pl`) for converting Markdown to +HTML. In the next decade, dozens of implementations were +developed in many languages. Some extended the original +Markdown syntax with conventions for footnotes, tables, and +other document elements. Some allowed Markdown documents to be +rendered in formats other than HTML. Websites like Reddit, +StackOverflow, and GitHub had millions of people using Markdown. +And Markdown started to be used beyond the web, to author books, +articles, slide shows, letters, and lecture notes. + +What distinguishes Markdown from many other lightweight markup +syntaxes, which are often easier to write, is its readability. +As Gruber writes: + +> The overriding design goal for Markdown's formatting syntax is +> to make it as readable as possible. The idea is that a +> Markdown-formatted document should be publishable as-is, as +> plain text, without looking like it's been marked up with tags +> or formatting instructions. +> (<http://daringfireball.net/projects/markdown/>) + +The point can be illustrated by comparing a sample of +[AsciiDoc](http://www.methods.co.nz/asciidoc/) with +an equivalent sample of Markdown. Here is a sample of +AsciiDoc from the AsciiDoc manual: + +``` +1. List item one. ++ +List item one continued with a second paragraph followed by an +Indented block. ++ +................. +$ ls *.sh +$ mv *.sh ~/tmp +................. ++ +List item continued with a third paragraph. + +2. List item two continued with an open block. ++ +-- +This paragraph is part of the preceding list item. + +a. This list is nested and does not require explicit item +continuation. ++ +This paragraph is part of the preceding list item. + +b. List item b. + +This paragraph belongs to item two of the outer list. +-- +``` + +And here is the equivalent in Markdown: +``` +1. List item one. + + List item one continued with a second paragraph followed by an + Indented block. + + $ ls *.sh + $ mv *.sh ~/tmp + + List item continued with a third paragraph. + +2. List item two continued with an open block. + + This paragraph is part of the preceding list item. + + 1. This list is nested and does not require explicit item continuation. + + This paragraph is part of the preceding list item. + + 2. List item b. + + This paragraph belongs to item two of the outer list. +``` + +The AsciiDoc version is, arguably, easier to write. You don't need +to worry about indentation. But the Markdown version is much easier +to read. The nesting of list items is apparent to the eye in the +source, not just in the processed document. + +## Why is a spec needed? + +John Gruber's [canonical description of Markdown's +syntax](http://daringfireball.net/projects/markdown/syntax) +does not specify the syntax unambiguously. Here are some examples of +questions it does not answer: + +1. How much indentation is needed for a sublist? The spec says that + continuation paragraphs need to be indented four spaces, but is + not fully explicit about sublists. It is natural to think that + they, too, must be indented four spaces, but `Markdown.pl` does + not require that. This is hardly a "corner case," and divergences + between implementations on this issue often lead to surprises for + users in real documents. (See [this comment by John + Gruber](http://article.gmane.org/gmane.text.markdown.general/1997).) + +2. Is a blank line needed before a block quote or heading? + Most implementations do not require the blank line. However, + this can lead to unexpected results in hard-wrapped text, and + also to ambiguities in parsing (note that some implementations + put the heading inside the blockquote, while others do not). + (John Gruber has also spoken [in favor of requiring the blank + lines](http://article.gmane.org/gmane.text.markdown.general/2146).) + +3. Is a blank line needed before an indented code block? + (`Markdown.pl` requires it, but this is not mentioned in the + documentation, and some implementations do not require it.) + + ``` markdown + paragraph + code? + ``` + +4. What is the exact rule for determining when list items get + wrapped in `<p>` tags? Can a list be partially "loose" and partially + "tight"? What should we do with a list like this? + + ``` markdown + 1. one + + 2. two + 3. three + ``` + + Or this? + + ``` markdown + 1. one + - a + + - b + 2. two + ``` + + (There are some relevant comments by John Gruber + [here](http://article.gmane.org/gmane.text.markdown.general/2554).) + +5. Can list markers be indented? Can ordered list markers be right-aligned? + + ``` markdown + 8. item 1 + 9. item 2 + 10. item 2a + ``` + +6. Is this one list with a thematic break in its second item, + or two lists separated by a thematic break? + + ``` markdown + * a + * * * * * + * b + ``` + +7. When list markers change from numbers to bullets, do we have + two lists or one? (The Markdown syntax description suggests two, + but the perl scripts and many other implementations produce one.) + + ``` markdown + 1. fee + 2. fie + - foe + - fum + ``` + +8. What are the precedence rules for the markers of inline structure? + For example, is the following a valid link, or does the code span + take precedence ? + + ``` markdown + [a backtick (`)](/url) and [another backtick (`)](/url). + ``` + +9. What are the precedence rules for markers of emphasis and strong + emphasis? For example, how should the following be parsed? + + ``` markdown + *foo *bar* baz* + ``` + +10. What are the precedence rules between block-level and inline-level + structure? For example, how should the following be parsed? + + ``` markdown + - `a long code span can contain a hyphen like this + - and it can screw things up` + ``` + +11. Can list items include section headings? (`Markdown.pl` does not + allow this, but does allow blockquotes to include headings.) + + ``` markdown + - # Heading + ``` + +12. Can list items be empty? + + ``` markdown + * a + * + * b + ``` + +13. Can link references be defined inside block quotes or list items? + + ``` markdown + > Blockquote [foo]. + > + > [foo]: /url + ``` + +14. If there are multiple definitions for the same reference, which takes + precedence? + + ``` markdown + [foo]: /url1 + [foo]: /url2 + + [foo][] + ``` + +In the absence of a spec, early implementers consulted `Markdown.pl` +to resolve these ambiguities. But `Markdown.pl` was quite buggy, and +gave manifestly bad results in many cases, so it was not a +satisfactory replacement for a spec. + +Because there is no unambiguous spec, implementations have diverged +considerably. As a result, users are often surprised to find that +a document that renders one way on one system (say, a GitHub wiki) +renders differently on another (say, converting to docbook using +pandoc). To make matters worse, because nothing in Markdown counts +as a "syntax error," the divergence often isn't discovered right away. + +## About this document + +This document attempts to specify Markdown syntax unambiguously. +It contains many examples with side-by-side Markdown and +HTML. These are intended to double as conformance tests. An +accompanying script `spec_tests.py` can be used to run the tests +against any Markdown program: + + python test/spec_tests.py --spec spec.txt --program PROGRAM + +Since this document describes how Markdown is to be parsed into +an abstract syntax tree, it would have made sense to use an abstract +representation of the syntax tree instead of HTML. But HTML is capable +of representing the structural distinctions we need to make, and the +choice of HTML for the tests makes it possible to run the tests against +an implementation without writing an abstract syntax tree renderer. + +This document is generated from a text file, `spec.txt`, written +in Markdown with a small extension for the side-by-side tests. +The script `tools/makespec.py` can be used to convert `spec.txt` into +HTML or CommonMark (which can then be converted into other formats). + +In the examples, the `→` character is used to represent tabs. + +# Preliminaries + +## Characters and lines + +Any sequence of [characters] is a valid CommonMark +document. + +A [character](@) is a Unicode code point. Although some +code points (for example, combining accents) do not correspond to +characters in an intuitive sense, all code points count as characters +for purposes of this spec. + +This spec does not specify an encoding; it thinks of lines as composed +of [characters] rather than bytes. A conforming parser may be limited +to a certain encoding. + +A [line](@) is a sequence of zero or more [characters] +other than newline (`U+000A`) or carriage return (`U+000D`), +followed by a [line ending] or by the end of file. + +A [line ending](@) is a newline (`U+000A`), a carriage return +(`U+000D`) not followed by a newline, or a carriage return and a +following newline. + +A line containing no characters, or a line containing only spaces +(`U+0020`) or tabs (`U+0009`), is called a [blank line](@). + +The following definitions of character classes will be used in this spec: + +A [whitespace character](@) is a space +(`U+0020`), tab (`U+0009`), newline (`U+000A`), line tabulation (`U+000B`), +form feed (`U+000C`), or carriage return (`U+000D`). + +[Whitespace](@) is a sequence of one or more [whitespace +characters]. + +A [Unicode whitespace character](@) is +any code point in the Unicode `Zs` general category, or a tab (`U+0009`), +carriage return (`U+000D`), newline (`U+000A`), or form feed +(`U+000C`). + +[Unicode whitespace](@) is a sequence of one +or more [Unicode whitespace characters]. + +A [space](@) is `U+0020`. + +A [non-whitespace character](@) is any character +that is not a [whitespace character]. + +An [ASCII punctuation character](@) +is `!`, `"`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, +`*`, `+`, `,`, `-`, `.`, `/` (U+0021–2F), +`:`, `;`, `<`, `=`, `>`, `?`, `@` (U+003A–0040), +`[`, `\`, `]`, `^`, `_`, `` ` `` (U+005B–0060), +`{`, `|`, `}`, or `~` (U+007B–007E). + +A [punctuation character](@) is an [ASCII +punctuation character] or anything in +the general Unicode categories `Pc`, `Pd`, `Pe`, `Pf`, `Pi`, `Po`, or `Ps`. + +## Tabs + +Tabs in lines are not expanded to [spaces]. However, +in contexts where whitespace helps to define block structure, +tabs behave as if they were replaced by spaces with a tab stop +of 4 characters. + +Thus, for example, a tab can be used instead of four spaces +in an indented code block. (Note, however, that internal +tabs are passed through as literal tabs, not expanded to +spaces.) + +```````````````````````````````` example +→foo→baz→→bim +. +<pre><code>foo→baz→→bim +</code></pre> +```````````````````````````````` + +```````````````````````````````` example + →foo→baz→→bim +. +<pre><code>foo→baz→→bim +</code></pre> +```````````````````````````````` + +```````````````````````````````` example + a→a + ὐ→a +. +<pre><code>a→a +ὐ→a +</code></pre> +```````````````````````````````` + +In the following example, a continuation paragraph of a list +item is indented with a tab; this has exactly the same effect +as indentation with four spaces would: + +```````````````````````````````` example + - foo + +→bar +. +<ul> +<li> +<p>foo</p> +<p>bar</p> +</li> +</ul> +```````````````````````````````` + +```````````````````````````````` example +- foo + +→→bar +. +<ul> +<li> +<p>foo</p> +<pre><code> bar +</code></pre> +</li> +</ul> +```````````````````````````````` + +Normally the `>` that begins a block quote may be followed +optionally by a space, which is not considered part of the +content. In the following case `>` is followed by a tab, +which is treated as if it were expanded into three spaces. +Since one of these spaces is considered part of the +delimiter, `foo` is considered to be indented six spaces +inside the block quote context, so we get an indented +code block starting with two spaces. + +```````````````````````````````` example +>→→foo +. +<blockquote> +<pre><code> foo +</code></pre> +</blockquote> +```````````````````````````````` + +```````````````````````````````` example +-→→foo +. +<ul> +<li> +<pre><code> foo +</code></pre> +</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example + foo +→bar +. +<pre><code>foo +bar +</code></pre> +```````````````````````````````` + +```````````````````````````````` example + - foo + - bar +→ - baz +. +<ul> +<li>foo +<ul> +<li>bar +<ul> +<li>baz</li> +</ul> +</li> +</ul> +</li> +</ul> +```````````````````````````````` + +```````````````````````````````` example +#→Foo +. +<h1>Foo</h1> +```````````````````````````````` + +```````````````````````````````` example +*→*→*→ +. +<hr /> +```````````````````````````````` + + +## Insecure characters + +For security reasons, the Unicode character `U+0000` must be replaced +with the REPLACEMENT CHARACTER (`U+FFFD`). + +# Blocks and inlines + +We can think of a document as a sequence of +[blocks](@)---structural elements like paragraphs, block +quotations, lists, headings, rules, and code blocks. Some blocks (like +block quotes and list items) contain other blocks; others (like +headings and paragraphs) contain [inline](@) content---text, +links, emphasized text, images, code spans, and so on. + +## Precedence + +Indicators of block structure always take precedence over indicators +of inline structure. So, for example, the following is a list with +two items, not a list with one item containing a code span: + +```````````````````````````````` example +- `one +- two` +. +<ul> +<li>`one</li> +<li>two`</li> +</ul> +```````````````````````````````` + + +This means that parsing can proceed in two steps: first, the block +structure of the document can be discerned; second, text lines inside +paragraphs, headings, and other block constructs can be parsed for inline +structure. The second step requires information about link reference +definitions that will be available only at the end of the first +step. Note that the first step requires processing lines in sequence, +but the second can be parallelized, since the inline parsing of +one block element does not affect the inline parsing of any other. + +## Container blocks and leaf blocks + +We can divide blocks into two types: +[container blocks](@), +which can contain other blocks, and [leaf blocks](@), +which cannot. + +# Leaf blocks + +This section describes the different kinds of leaf block that make up a +Markdown document. + +## Thematic breaks + +A line consisting of 0-3 spaces of indentation, followed by a sequence +of three or more matching `-`, `_`, or `*` characters, each followed +optionally by any number of spaces or tabs, forms a +[thematic break](@). + +```````````````````````````````` example +*** +--- +___ +. +<hr /> +<hr /> +<hr /> +```````````````````````````````` + + +Wrong characters: + +```````````````````````````````` example ++++ +. +<p>+++</p> +```````````````````````````````` + + +```````````````````````````````` example +=== +. +<p>===</p> +```````````````````````````````` + + +Not enough characters: + +```````````````````````````````` example +-- +** +__ +. +<p>-- +** +__</p> +```````````````````````````````` + + +One to three spaces indent are allowed: + +```````````````````````````````` example + *** + *** + *** +. +<hr /> +<hr /> +<hr /> +```````````````````````````````` + + +Four spaces is too many: + +```````````````````````````````` example + *** +. +<pre><code>*** +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +Foo + *** +. +<p>Foo +***</p> +```````````````````````````````` + + +More than three characters may be used: + +```````````````````````````````` example +_____________________________________ +. +<hr /> +```````````````````````````````` + + +Spaces are allowed between the characters: + +```````````````````````````````` example + - - - +. +<hr /> +```````````````````````````````` + + +```````````````````````````````` example + ** * ** * ** * ** +. +<hr /> +```````````````````````````````` + + +```````````````````````````````` example +- - - - +. +<hr /> +```````````````````````````````` + + +Spaces are allowed at the end: + +```````````````````````````````` example +- - - - +. +<hr /> +```````````````````````````````` + + +However, no other characters may occur in the line: + +```````````````````````````````` example +_ _ _ _ a + +a------ + +---a--- +. +<p>_ _ _ _ a</p> +<p>a------</p> +<p>---a---</p> +```````````````````````````````` + + +It is required that all of the [non-whitespace characters] be the same. +So, this is not a thematic break: + +```````````````````````````````` example + *-* +. +<p><em>-</em></p> +```````````````````````````````` + + +Thematic breaks do not need blank lines before or after: + +```````````````````````````````` example +- foo +*** +- bar +. +<ul> +<li>foo</li> +</ul> +<hr /> +<ul> +<li>bar</li> +</ul> +```````````````````````````````` + + +Thematic breaks can interrupt a paragraph: + +```````````````````````````````` example +Foo +*** +bar +. +<p>Foo</p> +<hr /> +<p>bar</p> +```````````````````````````````` + + +If a line of dashes that meets the above conditions for being a +thematic break could also be interpreted as the underline of a [setext +heading], the interpretation as a +[setext heading] takes precedence. Thus, for example, +this is a setext heading, not a paragraph followed by a thematic break: + +```````````````````````````````` example +Foo +--- +bar +. +<h2>Foo</h2> +<p>bar</p> +```````````````````````````````` + + +When both a thematic break and a list item are possible +interpretations of a line, the thematic break takes precedence: + +```````````````````````````````` example +* Foo +* * * +* Bar +. +<ul> +<li>Foo</li> +</ul> +<hr /> +<ul> +<li>Bar</li> +</ul> +```````````````````````````````` + + +If you want a thematic break in a list item, use a different bullet: + +```````````````````````````````` example +- Foo +- * * * +. +<ul> +<li>Foo</li> +<li> +<hr /> +</li> +</ul> +```````````````````````````````` + + +## ATX headings + +An [ATX heading](@) +consists of a string of characters, parsed as inline content, between an +opening sequence of 1--6 unescaped `#` characters and an optional +closing sequence of any number of unescaped `#` characters. +The opening sequence of `#` characters must be followed by a +[space] or by the end of line. The optional closing sequence of `#`s must be +preceded by a [space] and may be followed by spaces only. The opening +`#` character may be indented 0-3 spaces. The raw contents of the +heading are stripped of leading and trailing spaces before being parsed +as inline content. The heading level is equal to the number of `#` +characters in the opening sequence. + +Simple headings: + +```````````````````````````````` example +# foo +## foo +### foo +#### foo +##### foo +###### foo +. +<h1>foo</h1> +<h2>foo</h2> +<h3>foo</h3> +<h4>foo</h4> +<h5>foo</h5> +<h6>foo</h6> +```````````````````````````````` + + +More than six `#` characters is not a heading: + +```````````````````````````````` example +####### foo +. +<p>####### foo</p> +```````````````````````````````` + + +At least one space is required between the `#` characters and the +heading's contents, unless the heading is empty. Note that many +implementations currently do not require the space. However, the +space was required by the +[original ATX implementation](http://www.aaronsw.com/2002/atx/atx.py), +and it helps prevent things like the following from being parsed as +headings: + +```````````````````````````````` example +#5 bolt + +#hashtag +. +<p>#5 bolt</p> +<p>#hashtag</p> +```````````````````````````````` + + +This is not a heading, because the first `#` is escaped: + +```````````````````````````````` example +\## foo +. +<p>## foo</p> +```````````````````````````````` + + +Contents are parsed as inlines: + +```````````````````````````````` example +# foo *bar* \*baz\* +. +<h1>foo <em>bar</em> *baz*</h1> +```````````````````````````````` + + +Leading and trailing [whitespace] is ignored in parsing inline content: + +```````````````````````````````` example +# foo +. +<h1>foo</h1> +```````````````````````````````` + + +One to three spaces indentation are allowed: + +```````````````````````````````` example + ### foo + ## foo + # foo +. +<h3>foo</h3> +<h2>foo</h2> +<h1>foo</h1> +```````````````````````````````` + + +Four spaces are too much: + +```````````````````````````````` example + # foo +. +<pre><code># foo +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +foo + # bar +. +<p>foo +# bar</p> +```````````````````````````````` + + +A closing sequence of `#` characters is optional: + +```````````````````````````````` example +## foo ## + ### bar ### +. +<h2>foo</h2> +<h3>bar</h3> +```````````````````````````````` + + +It need not be the same length as the opening sequence: + +```````````````````````````````` example +# foo ################################## +##### foo ## +. +<h1>foo</h1> +<h5>foo</h5> +```````````````````````````````` + + +Spaces are allowed after the closing sequence: + +```````````````````````````````` example +### foo ### +. +<h3>foo</h3> +```````````````````````````````` + + +A sequence of `#` characters with anything but [spaces] following it +is not a closing sequence, but counts as part of the contents of the +heading: + +```````````````````````````````` example +### foo ### b +. +<h3>foo ### b</h3> +```````````````````````````````` + + +The closing sequence must be preceded by a space: + +```````````````````````````````` example +# foo# +. +<h1>foo#</h1> +```````````````````````````````` + + +Backslash-escaped `#` characters do not count as part +of the closing sequence: + +```````````````````````````````` example +### foo \### +## foo #\## +# foo \# +. +<h3>foo ###</h3> +<h2>foo ###</h2> +<h1>foo #</h1> +```````````````````````````````` + + +ATX headings need not be separated from surrounding content by blank +lines, and they can interrupt paragraphs: + +```````````````````````````````` example +**** +## foo +**** +. +<hr /> +<h2>foo</h2> +<hr /> +```````````````````````````````` + + +```````````````````````````````` example +Foo bar +# baz +Bar foo +. +<p>Foo bar</p> +<h1>baz</h1> +<p>Bar foo</p> +```````````````````````````````` + + +ATX headings can be empty: + +```````````````````````````````` example +## +# +### ### +. +<h2></h2> +<h1></h1> +<h3></h3> +```````````````````````````````` + + +## Setext headings + +A [setext heading](@) consists of one or more +lines of text, each containing at least one [non-whitespace +character], with no more than 3 spaces indentation, followed by +a [setext heading underline]. The lines of text must be such +that, were they not followed by the setext heading underline, +they would be interpreted as a paragraph: they cannot be +interpretable as a [code fence], [ATX heading][ATX headings], +[block quote][block quotes], [thematic break][thematic breaks], +[list item][list items], or [HTML block][HTML blocks]. + +A [setext heading underline](@) is a sequence of +`=` characters or a sequence of `-` characters, with no more than 3 +spaces indentation and any number of trailing spaces. If a line +containing a single `-` can be interpreted as an +empty [list items], it should be interpreted this way +and not as a [setext heading underline]. + +The heading is a level 1 heading if `=` characters are used in +the [setext heading underline], and a level 2 heading if `-` +characters are used. The contents of the heading are the result +of parsing the preceding lines of text as CommonMark inline +content. + +In general, a setext heading need not be preceded or followed by a +blank line. However, it cannot interrupt a paragraph, so when a +setext heading comes after a paragraph, a blank line is needed between +them. + +Simple examples: + +```````````````````````````````` example +Foo *bar* +========= + +Foo *bar* +--------- +. +<h1>Foo <em>bar</em></h1> +<h2>Foo <em>bar</em></h2> +```````````````````````````````` + + +The content of the header may span more than one line: + +```````````````````````````````` example +Foo *bar +baz* +==== +. +<h1>Foo <em>bar +baz</em></h1> +```````````````````````````````` + +The contents are the result of parsing the headings's raw +content as inlines. The heading's raw content is formed by +concatenating the lines and removing initial and final +[whitespace]. + +```````````````````````````````` example + Foo *bar +baz*→ +==== +. +<h1>Foo <em>bar +baz</em></h1> +```````````````````````````````` + + +The underlining can be any length: + +```````````````````````````````` example +Foo +------------------------- + +Foo += +. +<h2>Foo</h2> +<h1>Foo</h1> +```````````````````````````````` + + +The heading content can be indented up to three spaces, and need +not line up with the underlining: + +```````````````````````````````` example + Foo +--- + + Foo +----- + + Foo + === +. +<h2>Foo</h2> +<h2>Foo</h2> +<h1>Foo</h1> +```````````````````````````````` + + +Four spaces indent is too much: + +```````````````````````````````` example + Foo + --- + + Foo +--- +. +<pre><code>Foo +--- + +Foo +</code></pre> +<hr /> +```````````````````````````````` + + +The setext heading underline can be indented up to three spaces, and +may have trailing spaces: + +```````````````````````````````` example +Foo + ---- +. +<h2>Foo</h2> +```````````````````````````````` + + +Four spaces is too much: + +```````````````````````````````` example +Foo + --- +. +<p>Foo +---</p> +```````````````````````````````` + + +The setext heading underline cannot contain internal spaces: + +```````````````````````````````` example +Foo += = + +Foo +--- - +. +<p>Foo += =</p> +<p>Foo</p> +<hr /> +```````````````````````````````` + + +Trailing spaces in the content line do not cause a line break: + +```````````````````````````````` example +Foo +----- +. +<h2>Foo</h2> +```````````````````````````````` + + +Nor does a backslash at the end: + +```````````````````````````````` example +Foo\ +---- +. +<h2>Foo\</h2> +```````````````````````````````` + + +Since indicators of block structure take precedence over +indicators of inline structure, the following are setext headings: + +```````````````````````````````` example +`Foo +---- +` + +<a title="a lot +--- +of dashes"/> +. +<h2>`Foo</h2> +<p>`</p> +<h2><a title="a lot</h2> +<p>of dashes"/></p> +```````````````````````````````` + + +The setext heading underline cannot be a [lazy continuation +line] in a list item or block quote: + +```````````````````````````````` example +> Foo +--- +. +<blockquote> +<p>Foo</p> +</blockquote> +<hr /> +```````````````````````````````` + + +```````````````````````````````` example +> foo +bar +=== +. +<blockquote> +<p>foo +bar +===</p> +</blockquote> +```````````````````````````````` + + +```````````````````````````````` example +- Foo +--- +. +<ul> +<li>Foo</li> +</ul> +<hr /> +```````````````````````````````` + + +A blank line is needed between a paragraph and a following +setext heading, since otherwise the paragraph becomes part +of the heading's content: + +```````````````````````````````` example +Foo +Bar +--- +. +<h2>Foo +Bar</h2> +```````````````````````````````` + + +But in general a blank line is not required before or after +setext headings: + +```````````````````````````````` example +--- +Foo +--- +Bar +--- +Baz +. +<hr /> +<h2>Foo</h2> +<h2>Bar</h2> +<p>Baz</p> +```````````````````````````````` + + +Setext headings cannot be empty: + +```````````````````````````````` example + +==== +. +<p>====</p> +```````````````````````````````` + + +Setext heading text lines must not be interpretable as block +constructs other than paragraphs. So, the line of dashes +in these examples gets interpreted as a thematic break: + +```````````````````````````````` example +--- +--- +. +<hr /> +<hr /> +```````````````````````````````` + + +```````````````````````````````` example +- foo +----- +. +<ul> +<li>foo</li> +</ul> +<hr /> +```````````````````````````````` + + +```````````````````````````````` example + foo +--- +. +<pre><code>foo +</code></pre> +<hr /> +```````````````````````````````` + + +```````````````````````````````` example +> foo +----- +. +<blockquote> +<p>foo</p> +</blockquote> +<hr /> +```````````````````````````````` + + +If you want a heading with `> foo` as its literal text, you can +use backslash escapes: + +```````````````````````````````` example +\> foo +------ +. +<h2>> foo</h2> +```````````````````````````````` + + +**Compatibility note:** Most existing Markdown implementations +do not allow the text of setext headings to span multiple lines. +But there is no consensus about how to interpret + +``` markdown +Foo +bar +--- +baz +``` + +One can find four different interpretations: + +1. paragraph "Foo", heading "bar", paragraph "baz" +2. paragraph "Foo bar", thematic break, paragraph "baz" +3. paragraph "Foo bar --- baz" +4. heading "Foo bar", paragraph "baz" + +We find interpretation 4 most natural, and interpretation 4 +increases the expressive power of CommonMark, by allowing +multiline headings. Authors who want interpretation 1 can +put a blank line after the first paragraph: + +```````````````````````````````` example +Foo + +bar +--- +baz +. +<p>Foo</p> +<h2>bar</h2> +<p>baz</p> +```````````````````````````````` + + +Authors who want interpretation 2 can put blank lines around +the thematic break, + +```````````````````````````````` example +Foo +bar + +--- + +baz +. +<p>Foo +bar</p> +<hr /> +<p>baz</p> +```````````````````````````````` + + +or use a thematic break that cannot count as a [setext heading +underline], such as + +```````````````````````````````` example +Foo +bar +* * * +baz +. +<p>Foo +bar</p> +<hr /> +<p>baz</p> +```````````````````````````````` + + +Authors who want interpretation 3 can use backslash escapes: + +```````````````````````````````` example +Foo +bar +\--- +baz +. +<p>Foo +bar +--- +baz</p> +```````````````````````````````` + + +## Indented code blocks + +An [indented code block](@) is composed of one or more +[indented chunks] separated by blank lines. +An [indented chunk](@) is a sequence of non-blank lines, +each indented four or more spaces. The contents of the code block are +the literal contents of the lines, including trailing +[line endings], minus four spaces of indentation. +An indented code block has no [info string]. + +An indented code block cannot interrupt a paragraph, so there must be +a blank line between a paragraph and a following indented code block. +(A blank line is not needed, however, between a code block and a following +paragraph.) + +```````````````````````````````` example + a simple + indented code block +. +<pre><code>a simple + indented code block +</code></pre> +```````````````````````````````` + + +If there is any ambiguity between an interpretation of indentation +as a code block and as indicating that material belongs to a [list +item][list items], the list item interpretation takes precedence: + +```````````````````````````````` example + - foo + + bar +. +<ul> +<li> +<p>foo</p> +<p>bar</p> +</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +1. foo + + - bar +. +<ol> +<li> +<p>foo</p> +<ul> +<li>bar</li> +</ul> +</li> +</ol> +```````````````````````````````` + + + +The contents of a code block are literal text, and do not get parsed +as Markdown: + +```````````````````````````````` example + <a/> + *hi* + + - one +. +<pre><code><a/> +*hi* + +- one +</code></pre> +```````````````````````````````` + + +Here we have three chunks separated by blank lines: + +```````````````````````````````` example + chunk1 + + chunk2 + + + + chunk3 +. +<pre><code>chunk1 + +chunk2 + + + +chunk3 +</code></pre> +```````````````````````````````` + + +Any initial spaces beyond four will be included in the content, even +in interior blank lines: + +```````````````````````````````` example + chunk1 + + chunk2 +. +<pre><code>chunk1 + + chunk2 +</code></pre> +```````````````````````````````` + + +An indented code block cannot interrupt a paragraph. (This +allows hanging indents and the like.) + +```````````````````````````````` example +Foo + bar + +. +<p>Foo +bar</p> +```````````````````````````````` + + +However, any non-blank line with fewer than four leading spaces ends +the code block immediately. So a paragraph may occur immediately +after indented code: + +```````````````````````````````` example + foo +bar +. +<pre><code>foo +</code></pre> +<p>bar</p> +```````````````````````````````` + + +And indented code can occur immediately before and after other kinds of +blocks: + +```````````````````````````````` example +# Heading + foo +Heading +------ + foo +---- +. +<h1>Heading</h1> +<pre><code>foo +</code></pre> +<h2>Heading</h2> +<pre><code>foo +</code></pre> +<hr /> +```````````````````````````````` + + +The first line can be indented more than four spaces: + +```````````````````````````````` example + foo + bar +. +<pre><code> foo +bar +</code></pre> +```````````````````````````````` + + +Blank lines preceding or following an indented code block +are not included in it: + +```````````````````````````````` example + + + foo + + +. +<pre><code>foo +</code></pre> +```````````````````````````````` + + +Trailing spaces are included in the code block's content: + +```````````````````````````````` example + foo +. +<pre><code>foo +</code></pre> +```````````````````````````````` + + + +## Fenced code blocks + +A [code fence](@) is a sequence +of at least three consecutive backtick characters (`` ` ``) or +tildes (`~`). (Tildes and backticks cannot be mixed.) +A [fenced code block](@) +begins with a code fence, indented no more than three spaces. + +The line with the opening code fence may optionally contain some text +following the code fence; this is trimmed of leading and trailing +whitespace and called the [info string](@). If the [info string] comes +after a backtick fence, it may not contain any backtick +characters. (The reason for this restriction is that otherwise +some inline code would be incorrectly interpreted as the +beginning of a fenced code block.) + +The content of the code block consists of all subsequent lines, until +a closing [code fence] of the same type as the code block +began with (backticks or tildes), and with at least as many backticks +or tildes as the opening code fence. If the leading code fence is +indented N spaces, then up to N spaces of indentation are removed from +each line of the content (if present). (If a content line is not +indented, it is preserved unchanged. If it is indented less than N +spaces, all of the indentation is removed.) + +The closing code fence may be indented up to three spaces, and may be +followed only by spaces, which are ignored. If the end of the +containing block (or document) is reached and no closing code fence +has been found, the code block contains all of the lines after the +opening code fence until the end of the containing block (or +document). (An alternative spec would require backtracking in the +event that a closing code fence is not found. But this makes parsing +much less efficient, and there seems to be no real down side to the +behavior described here.) + +A fenced code block may interrupt a paragraph, and does not require +a blank line either before or after. + +The content of a code fence is treated as literal text, not parsed +as inlines. The first word of the [info string] is typically used to +specify the language of the code sample, and rendered in the `class` +attribute of the `code` tag. However, this spec does not mandate any +particular treatment of the [info string]. + +Here is a simple example with backticks: + +```````````````````````````````` example +``` +< + > +``` +. +<pre><code>< + > +</code></pre> +```````````````````````````````` + + +With tildes: + +```````````````````````````````` example +~~~ +< + > +~~~ +. +<pre><code>< + > +</code></pre> +```````````````````````````````` + +Fewer than three backticks is not enough: + +```````````````````````````````` example +`` +foo +`` +. +<p><code>foo</code></p> +```````````````````````````````` + +The closing code fence must use the same character as the opening +fence: + +```````````````````````````````` example +``` +aaa +~~~ +``` +. +<pre><code>aaa +~~~ +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +~~~ +aaa +``` +~~~ +. +<pre><code>aaa +``` +</code></pre> +```````````````````````````````` + + +The closing code fence must be at least as long as the opening fence: + +```````````````````````````````` example +```` +aaa +``` +`````` +. +<pre><code>aaa +``` +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +~~~~ +aaa +~~~ +~~~~ +. +<pre><code>aaa +~~~ +</code></pre> +```````````````````````````````` + + +Unclosed code blocks are closed by the end of the document +(or the enclosing [block quote][block quotes] or [list item][list items]): + +```````````````````````````````` example +``` +. +<pre><code></code></pre> +```````````````````````````````` + + +```````````````````````````````` example +````` + +``` +aaa +. +<pre><code> +``` +aaa +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +> ``` +> aaa + +bbb +. +<blockquote> +<pre><code>aaa +</code></pre> +</blockquote> +<p>bbb</p> +```````````````````````````````` + + +A code block can have all empty lines as its content: + +```````````````````````````````` example +``` + + +``` +. +<pre><code> + +</code></pre> +```````````````````````````````` + + +A code block can be empty: + +```````````````````````````````` example +``` +``` +. +<pre><code></code></pre> +```````````````````````````````` + + +Fences can be indented. If the opening fence is indented, +content lines will have equivalent opening indentation removed, +if present: + +```````````````````````````````` example + ``` + aaa +aaa +``` +. +<pre><code>aaa +aaa +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example + ``` +aaa + aaa +aaa + ``` +. +<pre><code>aaa +aaa +aaa +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example + ``` + aaa + aaa + aaa + ``` +. +<pre><code>aaa + aaa +aaa +</code></pre> +```````````````````````````````` + + +Four spaces indentation produces an indented code block: + +```````````````````````````````` example + ``` + aaa + ``` +. +<pre><code>``` +aaa +``` +</code></pre> +```````````````````````````````` + + +Closing fences may be indented by 0-3 spaces, and their indentation +need not match that of the opening fence: + +```````````````````````````````` example +``` +aaa + ``` +. +<pre><code>aaa +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example + ``` +aaa + ``` +. +<pre><code>aaa +</code></pre> +```````````````````````````````` + + +This is not a closing fence, because it is indented 4 spaces: + +```````````````````````````````` example +``` +aaa + ``` +. +<pre><code>aaa + ``` +</code></pre> +```````````````````````````````` + + + +Code fences (opening and closing) cannot contain internal spaces: + +```````````````````````````````` example +``` ``` +aaa +. +<p><code> </code> +aaa</p> +```````````````````````````````` + + +```````````````````````````````` example +~~~~~~ +aaa +~~~ ~~ +. +<pre><code>aaa +~~~ ~~ +</code></pre> +```````````````````````````````` + + +Fenced code blocks can interrupt paragraphs, and can be followed +directly by paragraphs, without a blank line between: + +```````````````````````````````` example +foo +``` +bar +``` +baz +. +<p>foo</p> +<pre><code>bar +</code></pre> +<p>baz</p> +```````````````````````````````` + + +Other blocks can also occur before and after fenced code blocks +without an intervening blank line: + +```````````````````````````````` example +foo +--- +~~~ +bar +~~~ +# baz +. +<h2>foo</h2> +<pre><code>bar +</code></pre> +<h1>baz</h1> +```````````````````````````````` + + +An [info string] can be provided after the opening code fence. +Although this spec doesn't mandate any particular treatment of +the info string, the first word is typically used to specify +the language of the code block. In HTML output, the language is +normally indicated by adding a class to the `code` element consisting +of `language-` followed by the language name. + +```````````````````````````````` example +```ruby +def foo(x) + return 3 +end +``` +. +<pre><code class="language-ruby">def foo(x) + return 3 +end +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +~~~~ ruby startline=3 $%@#$ +def foo(x) + return 3 +end +~~~~~~~ +. +<pre><code class="language-ruby">def foo(x) + return 3 +end +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +````; +```` +. +<pre><code class="language-;"></code></pre> +```````````````````````````````` + + +[Info strings] for backtick code blocks cannot contain backticks: + +```````````````````````````````` example +``` aa ``` +foo +. +<p><code>aa</code> +foo</p> +```````````````````````````````` + + +[Info strings] for tilde code blocks can contain backticks and tildes: + +```````````````````````````````` example +~~~ aa ``` ~~~ +foo +~~~ +. +<pre><code class="language-aa">foo +</code></pre> +```````````````````````````````` + + +Closing code fences cannot have [info strings]: + +```````````````````````````````` example +``` +``` aaa +``` +. +<pre><code>``` aaa +</code></pre> +```````````````````````````````` + + + +## HTML blocks + +An [HTML block](@) is a group of lines that is treated +as raw HTML (and will not be escaped in HTML output). + +There are seven kinds of [HTML block], which can be defined by their +start and end conditions. The block begins with a line that meets a +[start condition](@) (after up to three spaces optional indentation). +It ends with the first subsequent line that meets a matching [end +condition](@), or the last line of the document, or the last line of +the [container block](#container-blocks) containing the current HTML +block, if no line is encountered that meets the [end condition]. If +the first line meets both the [start condition] and the [end +condition], the block will contain just that line. + +1. **Start condition:** line begins with the string `<script`, +`<pre`, or `<style` (case-insensitive), followed by whitespace, +the string `>`, or the end of the line.\ +**End condition:** line contains an end tag +`</script>`, `</pre>`, or `</style>` (case-insensitive; it +need not match the start tag). + +2. **Start condition:** line begins with the string `<!--`.\ +**End condition:** line contains the string `-->`. + +3. **Start condition:** line begins with the string `<?`.\ +**End condition:** line contains the string `?>`. + +4. **Start condition:** line begins with the string `<!` +followed by an uppercase ASCII letter.\ +**End condition:** line contains the character `>`. + +5. **Start condition:** line begins with the string +`<![CDATA[`.\ +**End condition:** line contains the string `]]>`. + +6. **Start condition:** line begins the string `<` or `</` +followed by one of the strings (case-insensitive) `address`, +`article`, `aside`, `base`, `basefont`, `blockquote`, `body`, +`caption`, `center`, `col`, `colgroup`, `dd`, `details`, `dialog`, +`dir`, `div`, `dl`, `dt`, `fieldset`, `figcaption`, `figure`, +`footer`, `form`, `frame`, `frameset`, +`h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `head`, `header`, `hr`, +`html`, `iframe`, `legend`, `li`, `link`, `main`, `menu`, `menuitem`, +`nav`, `noframes`, `ol`, `optgroup`, `option`, `p`, `param`, +`section`, `source`, `summary`, `table`, `tbody`, `td`, +`tfoot`, `th`, `thead`, `title`, `tr`, `track`, `ul`, followed +by [whitespace], the end of the line, the string `>`, or +the string `/>`.\ +**End condition:** line is followed by a [blank line]. + +7. **Start condition:** line begins with a complete [open tag] +(with any [tag name] other than `script`, +`style`, or `pre`) or a complete [closing tag], +followed only by [whitespace] or the end of the line.\ +**End condition:** line is followed by a [blank line]. + +HTML blocks continue until they are closed by their appropriate +[end condition], or the last line of the document or other [container +block](#container-blocks). This means any HTML **within an HTML +block** that might otherwise be recognised as a start condition will +be ignored by the parser and passed through as-is, without changing +the parser's state. + +For instance, `<pre>` within a HTML block started by `<table>` will not affect +the parser state; as the HTML block was started in by start condition 6, it +will end at any blank line. This can be surprising: + +```````````````````````````````` example +<table><tr><td> +<pre> +**Hello**, + +_world_. +</pre> +</td></tr></table> +. +<table><tr><td> +<pre> +**Hello**, +<p><em>world</em>. +</pre></p> +</td></tr></table> +```````````````````````````````` + +In this case, the HTML block is terminated by the newline — the `**Hello**` +text remains verbatim — and regular parsing resumes, with a paragraph, +emphasised `world` and inline and block HTML following. + +All types of [HTML blocks] except type 7 may interrupt +a paragraph. Blocks of type 7 may not interrupt a paragraph. +(This restriction is intended to prevent unwanted interpretation +of long tags inside a wrapped paragraph as starting HTML blocks.) + +Some simple examples follow. Here are some basic HTML blocks +of type 6: + +```````````````````````````````` example +<table> + <tr> + <td> + hi + </td> + </tr> +</table> + +okay. +. +<table> + <tr> + <td> + hi + </td> + </tr> +</table> +<p>okay.</p> +```````````````````````````````` + + +```````````````````````````````` example + <div> + *hello* + <foo><a> +. + <div> + *hello* + <foo><a> +```````````````````````````````` + + +A block can also start with a closing tag: + +```````````````````````````````` example +</div> +*foo* +. +</div> +*foo* +```````````````````````````````` + + +Here we have two HTML blocks with a Markdown paragraph between them: + +```````````````````````````````` example +<DIV CLASS="foo"> + +*Markdown* + +</DIV> +. +<DIV CLASS="foo"> +<p><em>Markdown</em></p> +</DIV> +```````````````````````````````` + + +The tag on the first line can be partial, as long +as it is split where there would be whitespace: + +```````````````````````````````` example +<div id="foo" + class="bar"> +</div> +. +<div id="foo" + class="bar"> +</div> +```````````````````````````````` + + +```````````````````````````````` example +<div id="foo" class="bar + baz"> +</div> +. +<div id="foo" class="bar + baz"> +</div> +```````````````````````````````` + + +An open tag need not be closed: +```````````````````````````````` example +<div> +*foo* + +*bar* +. +<div> +*foo* +<p><em>bar</em></p> +```````````````````````````````` + + + +A partial tag need not even be completed (garbage +in, garbage out): + +```````````````````````````````` example +<div id="foo" +*hi* +. +<div id="foo" +*hi* +```````````````````````````````` + + +```````````````````````````````` example +<div class +foo +. +<div class +foo +```````````````````````````````` + + +The initial tag doesn't even need to be a valid +tag, as long as it starts like one: + +```````````````````````````````` example +<div *???-&&&-<--- +*foo* +. +<div *???-&&&-<--- +*foo* +```````````````````````````````` + + +In type 6 blocks, the initial tag need not be on a line by +itself: + +```````````````````````````````` example +<div><a href="bar">*foo*</a></div> +. +<div><a href="bar">*foo*</a></div> +```````````````````````````````` + + +```````````````````````````````` example +<table><tr><td> +foo +</td></tr></table> +. +<table><tr><td> +foo +</td></tr></table> +```````````````````````````````` + + +Everything until the next blank line or end of document +gets included in the HTML block. So, in the following +example, what looks like a Markdown code block +is actually part of the HTML block, which continues until a blank +line or the end of the document is reached: + +```````````````````````````````` example +<div></div> +``` c +int x = 33; +``` +. +<div></div> +``` c +int x = 33; +``` +```````````````````````````````` + + +To start an [HTML block] with a tag that is *not* in the +list of block-level tags in (6), you must put the tag by +itself on the first line (and it must be complete): + +```````````````````````````````` example +<a href="foo"> +*bar* +</a> +. +<a href="foo"> +*bar* +</a> +```````````````````````````````` + + +In type 7 blocks, the [tag name] can be anything: + +```````````````````````````````` example +<Warning> +*bar* +</Warning> +. +<Warning> +*bar* +</Warning> +```````````````````````````````` + + +```````````````````````````````` example +<i class="foo"> +*bar* +</i> +. +<i class="foo"> +*bar* +</i> +```````````````````````````````` + + +```````````````````````````````` example +</ins> +*bar* +. +</ins> +*bar* +```````````````````````````````` + + +These rules are designed to allow us to work with tags that +can function as either block-level or inline-level tags. +The `<del>` tag is a nice example. We can surround content with +`<del>` tags in three different ways. In this case, we get a raw +HTML block, because the `<del>` tag is on a line by itself: + +```````````````````````````````` example +<del> +*foo* +</del> +. +<del> +*foo* +</del> +```````````````````````````````` + + +In this case, we get a raw HTML block that just includes +the `<del>` tag (because it ends with the following blank +line). So the contents get interpreted as CommonMark: + +```````````````````````````````` example +<del> + +*foo* + +</del> +. +<del> +<p><em>foo</em></p> +</del> +```````````````````````````````` + + +Finally, in this case, the `<del>` tags are interpreted +as [raw HTML] *inside* the CommonMark paragraph. (Because +the tag is not on a line by itself, we get inline HTML +rather than an [HTML block].) + +```````````````````````````````` example +<del>*foo*</del> +. +<p><del><em>foo</em></del></p> +```````````````````````````````` + + +HTML tags designed to contain literal content +(`script`, `style`, `pre`), comments, processing instructions, +and declarations are treated somewhat differently. +Instead of ending at the first blank line, these blocks +end at the first line containing a corresponding end tag. +As a result, these blocks can contain blank lines: + +A pre tag (type 1): + +```````````````````````````````` example +<pre language="haskell"><code> +import Text.HTML.TagSoup + +main :: IO () +main = print $ parseTags tags +</code></pre> +okay +. +<pre language="haskell"><code> +import Text.HTML.TagSoup + +main :: IO () +main = print $ parseTags tags +</code></pre> +<p>okay</p> +```````````````````````````````` + + +A script tag (type 1): + +```````````````````````````````` example +<script type="text/javascript"> +// JavaScript example + +document.getElementById("demo").innerHTML = "Hello JavaScript!"; +</script> +okay +. +<script type="text/javascript"> +// JavaScript example + +document.getElementById("demo").innerHTML = "Hello JavaScript!"; +</script> +<p>okay</p> +```````````````````````````````` + + +A style tag (type 1): + +```````````````````````````````` example +<style + type="text/css"> +h1 {color:red;} + +p {color:blue;} +</style> +okay +. +<style + type="text/css"> +h1 {color:red;} + +p {color:blue;} +</style> +<p>okay</p> +```````````````````````````````` + + +If there is no matching end tag, the block will end at the +end of the document (or the enclosing [block quote][block quotes] +or [list item][list items]): + +```````````````````````````````` example +<style + type="text/css"> + +foo +. +<style + type="text/css"> + +foo +```````````````````````````````` + + +```````````````````````````````` example +> <div> +> foo + +bar +. +<blockquote> +<div> +foo +</blockquote> +<p>bar</p> +```````````````````````````````` + + +```````````````````````````````` example +- <div> +- foo +. +<ul> +<li> +<div> +</li> +<li>foo</li> +</ul> +```````````````````````````````` + + +The end tag can occur on the same line as the start tag: + +```````````````````````````````` example +<style>p{color:red;}</style> +*foo* +. +<style>p{color:red;}</style> +<p><em>foo</em></p> +```````````````````````````````` + + +```````````````````````````````` example +<!-- foo -->*bar* +*baz* +. +<!-- foo -->*bar* +<p><em>baz</em></p> +```````````````````````````````` + + +Note that anything on the last line after the +end tag will be included in the [HTML block]: + +```````````````````````````````` example +<script> +foo +</script>1. *bar* +. +<script> +foo +</script>1. *bar* +```````````````````````````````` + + +A comment (type 2): + +```````````````````````````````` example +<!-- Foo + +bar + baz --> +okay +. +<!-- Foo + +bar + baz --> +<p>okay</p> +```````````````````````````````` + + + +A processing instruction (type 3): + +```````````````````````````````` example +<?php + + echo '>'; + +?> +okay +. +<?php + + echo '>'; + +?> +<p>okay</p> +```````````````````````````````` + + +A declaration (type 4): + +```````````````````````````````` example +<!DOCTYPE html> +. +<!DOCTYPE html> +```````````````````````````````` + + +CDATA (type 5): + +```````````````````````````````` example +<![CDATA[ +function matchwo(a,b) +{ + if (a < b && a < 0) then { + return 1; + + } else { + + return 0; + } +} +]]> +okay +. +<![CDATA[ +function matchwo(a,b) +{ + if (a < b && a < 0) then { + return 1; + + } else { + + return 0; + } +} +]]> +<p>okay</p> +```````````````````````````````` + + +The opening tag can be indented 1-3 spaces, but not 4: + +```````````````````````````````` example + <!-- foo --> + + <!-- foo --> +. + <!-- foo --> +<pre><code><!-- foo --> +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example + <div> + + <div> +. + <div> +<pre><code><div> +</code></pre> +```````````````````````````````` + + +An HTML block of types 1--6 can interrupt a paragraph, and need not be +preceded by a blank line. + +```````````````````````````````` example +Foo +<div> +bar +</div> +. +<p>Foo</p> +<div> +bar +</div> +```````````````````````````````` + + +However, a following blank line is needed, except at the end of +a document, and except for blocks of types 1--5, [above][HTML +block]: + +```````````````````````````````` example +<div> +bar +</div> +*foo* +. +<div> +bar +</div> +*foo* +```````````````````````````````` + + +HTML blocks of type 7 cannot interrupt a paragraph: + +```````````````````````````````` example +Foo +<a href="bar"> +baz +. +<p>Foo +<a href="bar"> +baz</p> +```````````````````````````````` + + +This rule differs from John Gruber's original Markdown syntax +specification, which says: + +> The only restrictions are that block-level HTML elements — +> e.g. `<div>`, `<table>`, `<pre>`, `<p>`, etc. — must be separated from +> surrounding content by blank lines, and the start and end tags of the +> block should not be indented with tabs or spaces. + +In some ways Gruber's rule is more restrictive than the one given +here: + +- It requires that an HTML block be preceded by a blank line. +- It does not allow the start tag to be indented. +- It requires a matching end tag, which it also does not allow to + be indented. + +Most Markdown implementations (including some of Gruber's own) do not +respect all of these restrictions. + +There is one respect, however, in which Gruber's rule is more liberal +than the one given here, since it allows blank lines to occur inside +an HTML block. There are two reasons for disallowing them here. +First, it removes the need to parse balanced tags, which is +expensive and can require backtracking from the end of the document +if no matching end tag is found. Second, it provides a very simple +and flexible way of including Markdown content inside HTML tags: +simply separate the Markdown from the HTML using blank lines: + +Compare: + +```````````````````````````````` example +<div> + +*Emphasized* text. + +</div> +. +<div> +<p><em>Emphasized</em> text.</p> +</div> +```````````````````````````````` + + +```````````````````````````````` example +<div> +*Emphasized* text. +</div> +. +<div> +*Emphasized* text. +</div> +```````````````````````````````` + + +Some Markdown implementations have adopted a convention of +interpreting content inside tags as text if the open tag has +the attribute `markdown=1`. The rule given above seems a simpler and +more elegant way of achieving the same expressive power, which is also +much simpler to parse. + +The main potential drawback is that one can no longer paste HTML +blocks into Markdown documents with 100% reliability. However, +*in most cases* this will work fine, because the blank lines in +HTML are usually followed by HTML block tags. For example: + +```````````````````````````````` example +<table> + +<tr> + +<td> +Hi +</td> + +</tr> + +</table> +. +<table> +<tr> +<td> +Hi +</td> +</tr> +</table> +```````````````````````````````` + + +There are problems, however, if the inner tags are indented +*and* separated by spaces, as then they will be interpreted as +an indented code block: + +```````````````````````````````` example +<table> + + <tr> + + <td> + Hi + </td> + + </tr> + +</table> +. +<table> + <tr> +<pre><code><td> + Hi +</td> +</code></pre> + </tr> +</table> +```````````````````````````````` + + +Fortunately, blank lines are usually not necessary and can be +deleted. The exception is inside `<pre>` tags, but as described +[above][HTML blocks], raw HTML blocks starting with `<pre>` +*can* contain blank lines. + +## Link reference definitions + +A [link reference definition](@) +consists of a [link label], indented up to three spaces, followed +by a colon (`:`), optional [whitespace] (including up to one +[line ending]), a [link destination], +optional [whitespace] (including up to one +[line ending]), and an optional [link +title], which if it is present must be separated +from the [link destination] by [whitespace]. +No further [non-whitespace characters] may occur on the line. + +A [link reference definition] +does not correspond to a structural element of a document. Instead, it +defines a label which can be used in [reference links] +and reference-style [images] elsewhere in the document. [Link +reference definitions] can come either before or after the links that use +them. + +```````````````````````````````` example +[foo]: /url "title" + +[foo] +. +<p><a href="/url" title="title">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example + [foo]: + /url + 'the title' + +[foo] +. +<p><a href="/url" title="the title">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[Foo*bar\]]:my_(url) 'title (with parens)' + +[Foo*bar\]] +. +<p><a href="my_(url)" title="title (with parens)">Foo*bar]</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[Foo bar]: +<my url> +'title' + +[Foo bar] +. +<p><a href="my%20url" title="title">Foo bar</a></p> +```````````````````````````````` + + +The title may extend over multiple lines: + +```````````````````````````````` example +[foo]: /url ' +title +line1 +line2 +' + +[foo] +. +<p><a href="/url" title=" +title +line1 +line2 +">foo</a></p> +```````````````````````````````` + + +However, it may not contain a [blank line]: + +```````````````````````````````` example +[foo]: /url 'title + +with blank line' + +[foo] +. +<p>[foo]: /url 'title</p> +<p>with blank line'</p> +<p>[foo]</p> +```````````````````````````````` + + +The title may be omitted: + +```````````````````````````````` example +[foo]: +/url + +[foo] +. +<p><a href="/url">foo</a></p> +```````````````````````````````` + + +The link destination may not be omitted: + +```````````````````````````````` example +[foo]: + +[foo] +. +<p>[foo]:</p> +<p>[foo]</p> +```````````````````````````````` + + However, an empty link destination may be specified using + angle brackets: + +```````````````````````````````` example +[foo]: <> + +[foo] +. +<p><a href="">foo</a></p> +```````````````````````````````` + +The title must be separated from the link destination by +whitespace: + +```````````````````````````````` example +[foo]: <bar>(baz) + +[foo] +. +<p>[foo]: <bar>(baz)</p> +<p>[foo]</p> +```````````````````````````````` + + +Both title and destination can contain backslash escapes +and literal backslashes: + +```````````````````````````````` example +[foo]: /url\bar\*baz "foo\"bar\baz" + +[foo] +. +<p><a href="/url%5Cbar*baz" title="foo"bar\baz">foo</a></p> +```````````````````````````````` + + +A link can come before its corresponding definition: + +```````````````````````````````` example +[foo] + +[foo]: url +. +<p><a href="url">foo</a></p> +```````````````````````````````` + + +If there are several matching definitions, the first one takes +precedence: + +```````````````````````````````` example +[foo] + +[foo]: first +[foo]: second +. +<p><a href="first">foo</a></p> +```````````````````````````````` + + +As noted in the section on [Links], matching of labels is +case-insensitive (see [matches]). + +```````````````````````````````` example +[FOO]: /url + +[Foo] +. +<p><a href="/url">Foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[ΑΓΩ]: /φου + +[αγω] +. +<p><a href="/%CF%86%CE%BF%CF%85">αγω</a></p> +```````````````````````````````` + + +Here is a link reference definition with no corresponding link. +It contributes nothing to the document. + +```````````````````````````````` example +[foo]: /url +. +```````````````````````````````` + + +Here is another one: + +```````````````````````````````` example +[ +foo +]: /url +bar +. +<p>bar</p> +```````````````````````````````` + + +This is not a link reference definition, because there are +[non-whitespace characters] after the title: + +```````````````````````````````` example +[foo]: /url "title" ok +. +<p>[foo]: /url "title" ok</p> +```````````````````````````````` + + +This is a link reference definition, but it has no title: + +```````````````````````````````` example +[foo]: /url +"title" ok +. +<p>"title" ok</p> +```````````````````````````````` + + +This is not a link reference definition, because it is indented +four spaces: + +```````````````````````````````` example + [foo]: /url "title" + +[foo] +. +<pre><code>[foo]: /url "title" +</code></pre> +<p>[foo]</p> +```````````````````````````````` + + +This is not a link reference definition, because it occurs inside +a code block: + +```````````````````````````````` example +``` +[foo]: /url +``` + +[foo] +. +<pre><code>[foo]: /url +</code></pre> +<p>[foo]</p> +```````````````````````````````` + + +A [link reference definition] cannot interrupt a paragraph. + +```````````````````````````````` example +Foo +[bar]: /baz + +[bar] +. +<p>Foo +[bar]: /baz</p> +<p>[bar]</p> +```````````````````````````````` + + +However, it can directly follow other block elements, such as headings +and thematic breaks, and it need not be followed by a blank line. + +```````````````````````````````` example +# [Foo] +[foo]: /url +> bar +. +<h1><a href="/url">Foo</a></h1> +<blockquote> +<p>bar</p> +</blockquote> +```````````````````````````````` + +```````````````````````````````` example +[foo]: /url +bar +=== +[foo] +. +<h1>bar</h1> +<p><a href="/url">foo</a></p> +```````````````````````````````` + +```````````````````````````````` example +[foo]: /url +=== +[foo] +. +<p>=== +<a href="/url">foo</a></p> +```````````````````````````````` + + +Several [link reference definitions] +can occur one after another, without intervening blank lines. + +```````````````````````````````` example +[foo]: /foo-url "foo" +[bar]: /bar-url + "bar" +[baz]: /baz-url + +[foo], +[bar], +[baz] +. +<p><a href="/foo-url" title="foo">foo</a>, +<a href="/bar-url" title="bar">bar</a>, +<a href="/baz-url">baz</a></p> +```````````````````````````````` + + +[Link reference definitions] can occur +inside block containers, like lists and block quotations. They +affect the entire document, not just the container in which they +are defined: + +```````````````````````````````` example +[foo] + +> [foo]: /url +. +<p><a href="/url">foo</a></p> +<blockquote> +</blockquote> +```````````````````````````````` + + +Whether something is a [link reference definition] is +independent of whether the link reference it defines is +used in the document. Thus, for example, the following +document contains just a link reference definition, and +no visible content: + +```````````````````````````````` example +[foo]: /url +. +```````````````````````````````` + + +## Paragraphs + +A sequence of non-blank lines that cannot be interpreted as other +kinds of blocks forms a [paragraph](@). +The contents of the paragraph are the result of parsing the +paragraph's raw content as inlines. The paragraph's raw content +is formed by concatenating the lines and removing initial and final +[whitespace]. + +A simple example with two paragraphs: + +```````````````````````````````` example +aaa + +bbb +. +<p>aaa</p> +<p>bbb</p> +```````````````````````````````` + + +Paragraphs can contain multiple lines, but no blank lines: + +```````````````````````````````` example +aaa +bbb + +ccc +ddd +. +<p>aaa +bbb</p> +<p>ccc +ddd</p> +```````````````````````````````` + + +Multiple blank lines between paragraph have no effect: + +```````````````````````````````` example +aaa + + +bbb +. +<p>aaa</p> +<p>bbb</p> +```````````````````````````````` + + +Leading spaces are skipped: + +```````````````````````````````` example + aaa + bbb +. +<p>aaa +bbb</p> +```````````````````````````````` + + +Lines after the first may be indented any amount, since indented +code blocks cannot interrupt paragraphs. + +```````````````````````````````` example +aaa + bbb + ccc +. +<p>aaa +bbb +ccc</p> +```````````````````````````````` + + +However, the first line may be indented at most three spaces, +or an indented code block will be triggered: + +```````````````````````````````` example + aaa +bbb +. +<p>aaa +bbb</p> +```````````````````````````````` + + +```````````````````````````````` example + aaa +bbb +. +<pre><code>aaa +</code></pre> +<p>bbb</p> +```````````````````````````````` + + +Final spaces are stripped before inline parsing, so a paragraph +that ends with two or more spaces will not end with a [hard line +break]: + +```````````````````````````````` example +aaa +bbb +. +<p>aaa<br /> +bbb</p> +```````````````````````````````` + + +## Blank lines + +[Blank lines] between block-level elements are ignored, +except for the role they play in determining whether a [list] +is [tight] or [loose]. + +Blank lines at the beginning and end of the document are also ignored. + +```````````````````````````````` example + + +aaa + + +# aaa + + +. +<p>aaa</p> +<h1>aaa</h1> +```````````````````````````````` + + + +# Container blocks + +A [container block](#container-blocks) is a block that has other +blocks as its contents. There are two basic kinds of container blocks: +[block quotes] and [list items]. +[Lists] are meta-containers for [list items]. + +We define the syntax for container blocks recursively. The general +form of the definition is: + +> If X is a sequence of blocks, then the result of +> transforming X in such-and-such a way is a container of type Y +> with these blocks as its content. + +So, we explain what counts as a block quote or list item by explaining +how these can be *generated* from their contents. This should suffice +to define the syntax, although it does not give a recipe for *parsing* +these constructions. (A recipe is provided below in the section entitled +[A parsing strategy](#appendix-a-parsing-strategy).) + +## Block quotes + +A [block quote marker](@) +consists of 0-3 spaces of initial indent, plus (a) the character `>` together +with a following space, or (b) a single character `>` not followed by a space. + +The following rules define [block quotes]: + +1. **Basic case.** If a string of lines *Ls* constitute a sequence + of blocks *Bs*, then the result of prepending a [block quote + marker] to the beginning of each line in *Ls* + is a [block quote](#block-quotes) containing *Bs*. + +2. **Laziness.** If a string of lines *Ls* constitute a [block + quote](#block-quotes) with contents *Bs*, then the result of deleting + the initial [block quote marker] from one or + more lines in which the next [non-whitespace character] after the [block + quote marker] is [paragraph continuation + text] is a block quote with *Bs* as its content. + [Paragraph continuation text](@) is text + that will be parsed as part of the content of a paragraph, but does + not occur at the beginning of the paragraph. + +3. **Consecutiveness.** A document cannot contain two [block + quotes] in a row unless there is a [blank line] between them. + +Nothing else counts as a [block quote](#block-quotes). + +Here is a simple example: + +```````````````````````````````` example +> # Foo +> bar +> baz +. +<blockquote> +<h1>Foo</h1> +<p>bar +baz</p> +</blockquote> +```````````````````````````````` + + +The spaces after the `>` characters can be omitted: + +```````````````````````````````` example +># Foo +>bar +> baz +. +<blockquote> +<h1>Foo</h1> +<p>bar +baz</p> +</blockquote> +```````````````````````````````` + + +The `>` characters can be indented 1-3 spaces: + +```````````````````````````````` example + > # Foo + > bar + > baz +. +<blockquote> +<h1>Foo</h1> +<p>bar +baz</p> +</blockquote> +```````````````````````````````` + + +Four spaces gives us a code block: + +```````````````````````````````` example + > # Foo + > bar + > baz +. +<pre><code>> # Foo +> bar +> baz +</code></pre> +```````````````````````````````` + + +The Laziness clause allows us to omit the `>` before +[paragraph continuation text]: + +```````````````````````````````` example +> # Foo +> bar +baz +. +<blockquote> +<h1>Foo</h1> +<p>bar +baz</p> +</blockquote> +```````````````````````````````` + + +A block quote can contain some lazy and some non-lazy +continuation lines: + +```````````````````````````````` example +> bar +baz +> foo +. +<blockquote> +<p>bar +baz +foo</p> +</blockquote> +```````````````````````````````` + + +Laziness only applies to lines that would have been continuations of +paragraphs had they been prepended with [block quote markers]. +For example, the `> ` cannot be omitted in the second line of + +``` markdown +> foo +> --- +``` + +without changing the meaning: + +```````````````````````````````` example +> foo +--- +. +<blockquote> +<p>foo</p> +</blockquote> +<hr /> +```````````````````````````````` + + +Similarly, if we omit the `> ` in the second line of + +``` markdown +> - foo +> - bar +``` + +then the block quote ends after the first line: + +```````````````````````````````` example +> - foo +- bar +. +<blockquote> +<ul> +<li>foo</li> +</ul> +</blockquote> +<ul> +<li>bar</li> +</ul> +```````````````````````````````` + + +For the same reason, we can't omit the `> ` in front of +subsequent lines of an indented or fenced code block: + +```````````````````````````````` example +> foo + bar +. +<blockquote> +<pre><code>foo +</code></pre> +</blockquote> +<pre><code>bar +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +> ``` +foo +``` +. +<blockquote> +<pre><code></code></pre> +</blockquote> +<p>foo</p> +<pre><code></code></pre> +```````````````````````````````` + + +Note that in the following case, we have a [lazy +continuation line]: + +```````````````````````````````` example +> foo + - bar +. +<blockquote> +<p>foo +- bar</p> +</blockquote> +```````````````````````````````` + + +To see why, note that in + +```markdown +> foo +> - bar +``` + +the `- bar` is indented too far to start a list, and can't +be an indented code block because indented code blocks cannot +interrupt paragraphs, so it is [paragraph continuation text]. + +A block quote can be empty: + +```````````````````````````````` example +> +. +<blockquote> +</blockquote> +```````````````````````````````` + + +```````````````````````````````` example +> +> +> +. +<blockquote> +</blockquote> +```````````````````````````````` + + +A block quote can have initial or final blank lines: + +```````````````````````````````` example +> +> foo +> +. +<blockquote> +<p>foo</p> +</blockquote> +```````````````````````````````` + + +A blank line always separates block quotes: + +```````````````````````````````` example +> foo + +> bar +. +<blockquote> +<p>foo</p> +</blockquote> +<blockquote> +<p>bar</p> +</blockquote> +```````````````````````````````` + + +(Most current Markdown implementations, including John Gruber's +original `Markdown.pl`, will parse this example as a single block quote +with two paragraphs. But it seems better to allow the author to decide +whether two block quotes or one are wanted.) + +Consecutiveness means that if we put these block quotes together, +we get a single block quote: + +```````````````````````````````` example +> foo +> bar +. +<blockquote> +<p>foo +bar</p> +</blockquote> +```````````````````````````````` + + +To get a block quote with two paragraphs, use: + +```````````````````````````````` example +> foo +> +> bar +. +<blockquote> +<p>foo</p> +<p>bar</p> +</blockquote> +```````````````````````````````` + + +Block quotes can interrupt paragraphs: + +```````````````````````````````` example +foo +> bar +. +<p>foo</p> +<blockquote> +<p>bar</p> +</blockquote> +```````````````````````````````` + + +In general, blank lines are not needed before or after block +quotes: + +```````````````````````````````` example +> aaa +*** +> bbb +. +<blockquote> +<p>aaa</p> +</blockquote> +<hr /> +<blockquote> +<p>bbb</p> +</blockquote> +```````````````````````````````` + + +However, because of laziness, a blank line is needed between +a block quote and a following paragraph: + +```````````````````````````````` example +> bar +baz +. +<blockquote> +<p>bar +baz</p> +</blockquote> +```````````````````````````````` + + +```````````````````````````````` example +> bar + +baz +. +<blockquote> +<p>bar</p> +</blockquote> +<p>baz</p> +```````````````````````````````` + + +```````````````````````````````` example +> bar +> +baz +. +<blockquote> +<p>bar</p> +</blockquote> +<p>baz</p> +```````````````````````````````` + + +It is a consequence of the Laziness rule that any number +of initial `>`s may be omitted on a continuation line of a +nested block quote: + +```````````````````````````````` example +> > > foo +bar +. +<blockquote> +<blockquote> +<blockquote> +<p>foo +bar</p> +</blockquote> +</blockquote> +</blockquote> +```````````````````````````````` + + +```````````````````````````````` example +>>> foo +> bar +>>baz +. +<blockquote> +<blockquote> +<blockquote> +<p>foo +bar +baz</p> +</blockquote> +</blockquote> +</blockquote> +```````````````````````````````` + + +When including an indented code block in a block quote, +remember that the [block quote marker] includes +both the `>` and a following space. So *five spaces* are needed after +the `>`: + +```````````````````````````````` example +> code + +> not code +. +<blockquote> +<pre><code>code +</code></pre> +</blockquote> +<blockquote> +<p>not code</p> +</blockquote> +```````````````````````````````` + + + +## List items + +A [list marker](@) is a +[bullet list marker] or an [ordered list marker]. + +A [bullet list marker](@) +is a `-`, `+`, or `*` character. + +An [ordered list marker](@) +is a sequence of 1--9 arabic digits (`0-9`), followed by either a +`.` character or a `)` character. (The reason for the length +limit is that with 10 digits we start seeing integer overflows +in some browsers.) + +The following rules define [list items]: + +1. **Basic case.** If a sequence of lines *Ls* constitute a sequence of + blocks *Bs* starting with a [non-whitespace character], and *M* is a + list marker of width *W* followed by 1 ≤ *N* ≤ 4 spaces, then the result + of prepending *M* and the following spaces to the first line of + *Ls*, and indenting subsequent lines of *Ls* by *W + N* spaces, is a + list item with *Bs* as its contents. The type of the list item + (bullet or ordered) is determined by the type of its list marker. + If the list item is ordered, then it is also assigned a start + number, based on the ordered list marker. + + Exceptions: + + 1. When the first list item in a [list] interrupts + a paragraph---that is, when it starts on a line that would + otherwise count as [paragraph continuation text]---then (a) + the lines *Ls* must not begin with a blank line, and (b) if + the list item is ordered, the start number must be 1. + 2. If any line is a [thematic break][thematic breaks] then + that line is not a list item. + +For example, let *Ls* be the lines + +```````````````````````````````` example +A paragraph +with two lines. + + indented code + +> A block quote. +. +<p>A paragraph +with two lines.</p> +<pre><code>indented code +</code></pre> +<blockquote> +<p>A block quote.</p> +</blockquote> +```````````````````````````````` + + +And let *M* be the marker `1.`, and *N* = 2. Then rule #1 says +that the following is an ordered list item with start number 1, +and the same contents as *Ls*: + +```````````````````````````````` example +1. A paragraph + with two lines. + + indented code + + > A block quote. +. +<ol> +<li> +<p>A paragraph +with two lines.</p> +<pre><code>indented code +</code></pre> +<blockquote> +<p>A block quote.</p> +</blockquote> +</li> +</ol> +```````````````````````````````` + + +The most important thing to notice is that the position of +the text after the list marker determines how much indentation +is needed in subsequent blocks in the list item. If the list +marker takes up two spaces, and there are three spaces between +the list marker and the next [non-whitespace character], then blocks +must be indented five spaces in order to fall under the list +item. + +Here are some examples showing how far content must be indented to be +put under the list item: + +```````````````````````````````` example +- one + + two +. +<ul> +<li>one</li> +</ul> +<p>two</p> +```````````````````````````````` + + +```````````````````````````````` example +- one + + two +. +<ul> +<li> +<p>one</p> +<p>two</p> +</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example + - one + + two +. +<ul> +<li>one</li> +</ul> +<pre><code> two +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example + - one + + two +. +<ul> +<li> +<p>one</p> +<p>two</p> +</li> +</ul> +```````````````````````````````` + + +It is tempting to think of this in terms of columns: the continuation +blocks must be indented at least to the column of the first +[non-whitespace character] after the list marker. However, that is not quite right. +The spaces after the list marker determine how much relative indentation +is needed. Which column this indentation reaches will depend on +how the list item is embedded in other constructions, as shown by +this example: + +```````````````````````````````` example + > > 1. one +>> +>> two +. +<blockquote> +<blockquote> +<ol> +<li> +<p>one</p> +<p>two</p> +</li> +</ol> +</blockquote> +</blockquote> +```````````````````````````````` + + +Here `two` occurs in the same column as the list marker `1.`, +but is actually contained in the list item, because there is +sufficient indentation after the last containing blockquote marker. + +The converse is also possible. In the following example, the word `two` +occurs far to the right of the initial text of the list item, `one`, but +it is not considered part of the list item, because it is not indented +far enough past the blockquote marker: + +```````````````````````````````` example +>>- one +>> + > > two +. +<blockquote> +<blockquote> +<ul> +<li>one</li> +</ul> +<p>two</p> +</blockquote> +</blockquote> +```````````````````````````````` + + +Note that at least one space is needed between the list marker and +any following content, so these are not list items: + +```````````````````````````````` example +-one + +2.two +. +<p>-one</p> +<p>2.two</p> +```````````````````````````````` + + +A list item may contain blocks that are separated by more than +one blank line. + +```````````````````````````````` example +- foo + + + bar +. +<ul> +<li> +<p>foo</p> +<p>bar</p> +</li> +</ul> +```````````````````````````````` + + +A list item may contain any kind of block: + +```````````````````````````````` example +1. foo + + ``` + bar + ``` + + baz + + > bam +. +<ol> +<li> +<p>foo</p> +<pre><code>bar +</code></pre> +<p>baz</p> +<blockquote> +<p>bam</p> +</blockquote> +</li> +</ol> +```````````````````````````````` + + +A list item that contains an indented code block will preserve +empty lines within the code block verbatim. + +```````````````````````````````` example +- Foo + + bar + + + baz +. +<ul> +<li> +<p>Foo</p> +<pre><code>bar + + +baz +</code></pre> +</li> +</ul> +```````````````````````````````` + +Note that ordered list start numbers must be nine digits or less: + +```````````````````````````````` example +123456789. ok +. +<ol start="123456789"> +<li>ok</li> +</ol> +```````````````````````````````` + + +```````````````````````````````` example +1234567890. not ok +. +<p>1234567890. not ok</p> +```````````````````````````````` + + +A start number may begin with 0s: + +```````````````````````````````` example +0. ok +. +<ol start="0"> +<li>ok</li> +</ol> +```````````````````````````````` + + +```````````````````````````````` example +003. ok +. +<ol start="3"> +<li>ok</li> +</ol> +```````````````````````````````` + + +A start number may not be negative: + +```````````````````````````````` example +-1. not ok +. +<p>-1. not ok</p> +```````````````````````````````` + + + +2. **Item starting with indented code.** If a sequence of lines *Ls* + constitute a sequence of blocks *Bs* starting with an indented code + block, and *M* is a list marker of width *W* followed by + one space, then the result of prepending *M* and the following + space to the first line of *Ls*, and indenting subsequent lines of + *Ls* by *W + 1* spaces, is a list item with *Bs* as its contents. + If a line is empty, then it need not be indented. The type of the + list item (bullet or ordered) is determined by the type of its list + marker. If the list item is ordered, then it is also assigned a + start number, based on the ordered list marker. + +An indented code block will have to be indented four spaces beyond +the edge of the region where text will be included in the list item. +In the following case that is 6 spaces: + +```````````````````````````````` example +- foo + + bar +. +<ul> +<li> +<p>foo</p> +<pre><code>bar +</code></pre> +</li> +</ul> +```````````````````````````````` + + +And in this case it is 11 spaces: + +```````````````````````````````` example + 10. foo + + bar +. +<ol start="10"> +<li> +<p>foo</p> +<pre><code>bar +</code></pre> +</li> +</ol> +```````````````````````````````` + + +If the *first* block in the list item is an indented code block, +then by rule #2, the contents must be indented *one* space after the +list marker: + +```````````````````````````````` example + indented code + +paragraph + + more code +. +<pre><code>indented code +</code></pre> +<p>paragraph</p> +<pre><code>more code +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +1. indented code + + paragraph + + more code +. +<ol> +<li> +<pre><code>indented code +</code></pre> +<p>paragraph</p> +<pre><code>more code +</code></pre> +</li> +</ol> +```````````````````````````````` + + +Note that an additional space indent is interpreted as space +inside the code block: + +```````````````````````````````` example +1. indented code + + paragraph + + more code +. +<ol> +<li> +<pre><code> indented code +</code></pre> +<p>paragraph</p> +<pre><code>more code +</code></pre> +</li> +</ol> +```````````````````````````````` + + +Note that rules #1 and #2 only apply to two cases: (a) cases +in which the lines to be included in a list item begin with a +[non-whitespace character], and (b) cases in which +they begin with an indented code +block. In a case like the following, where the first block begins with +a three-space indent, the rules do not allow us to form a list item by +indenting the whole thing and prepending a list marker: + +```````````````````````````````` example + foo + +bar +. +<p>foo</p> +<p>bar</p> +```````````````````````````````` + + +```````````````````````````````` example +- foo + + bar +. +<ul> +<li>foo</li> +</ul> +<p>bar</p> +```````````````````````````````` + + +This is not a significant restriction, because when a block begins +with 1-3 spaces indent, the indentation can always be removed without +a change in interpretation, allowing rule #1 to be applied. So, in +the above case: + +```````````````````````````````` example +- foo + + bar +. +<ul> +<li> +<p>foo</p> +<p>bar</p> +</li> +</ul> +```````````````````````````````` + + +3. **Item starting with a blank line.** If a sequence of lines *Ls* + starting with a single [blank line] constitute a (possibly empty) + sequence of blocks *Bs*, not separated from each other by more than + one blank line, and *M* is a list marker of width *W*, + then the result of prepending *M* to the first line of *Ls*, and + indenting subsequent lines of *Ls* by *W + 1* spaces, is a list + item with *Bs* as its contents. + If a line is empty, then it need not be indented. The type of the + list item (bullet or ordered) is determined by the type of its list + marker. If the list item is ordered, then it is also assigned a + start number, based on the ordered list marker. + +Here are some list items that start with a blank line but are not empty: + +```````````````````````````````` example +- + foo +- + ``` + bar + ``` +- + baz +. +<ul> +<li>foo</li> +<li> +<pre><code>bar +</code></pre> +</li> +<li> +<pre><code>baz +</code></pre> +</li> +</ul> +```````````````````````````````` + +When the list item starts with a blank line, the number of spaces +following the list marker doesn't change the required indentation: + +```````````````````````````````` example +- + foo +. +<ul> +<li>foo</li> +</ul> +```````````````````````````````` + + +A list item can begin with at most one blank line. +In the following example, `foo` is not part of the list +item: + +```````````````````````````````` example +- + + foo +. +<ul> +<li></li> +</ul> +<p>foo</p> +```````````````````````````````` + + +Here is an empty bullet list item: + +```````````````````````````````` example +- foo +- +- bar +. +<ul> +<li>foo</li> +<li></li> +<li>bar</li> +</ul> +```````````````````````````````` + + +It does not matter whether there are spaces following the [list marker]: + +```````````````````````````````` example +- foo +- +- bar +. +<ul> +<li>foo</li> +<li></li> +<li>bar</li> +</ul> +```````````````````````````````` + + +Here is an empty ordered list item: + +```````````````````````````````` example +1. foo +2. +3. bar +. +<ol> +<li>foo</li> +<li></li> +<li>bar</li> +</ol> +```````````````````````````````` + + +A list may start or end with an empty list item: + +```````````````````````````````` example +* +. +<ul> +<li></li> +</ul> +```````````````````````````````` + +However, an empty list item cannot interrupt a paragraph: + +```````````````````````````````` example +foo +* + +foo +1. +. +<p>foo +*</p> +<p>foo +1.</p> +```````````````````````````````` + + +4. **Indentation.** If a sequence of lines *Ls* constitutes a list item + according to rule #1, #2, or #3, then the result of indenting each line + of *Ls* by 1-3 spaces (the same for each line) also constitutes a + list item with the same contents and attributes. If a line is + empty, then it need not be indented. + +Indented one space: + +```````````````````````````````` example + 1. A paragraph + with two lines. + + indented code + + > A block quote. +. +<ol> +<li> +<p>A paragraph +with two lines.</p> +<pre><code>indented code +</code></pre> +<blockquote> +<p>A block quote.</p> +</blockquote> +</li> +</ol> +```````````````````````````````` + + +Indented two spaces: + +```````````````````````````````` example + 1. A paragraph + with two lines. + + indented code + + > A block quote. +. +<ol> +<li> +<p>A paragraph +with two lines.</p> +<pre><code>indented code +</code></pre> +<blockquote> +<p>A block quote.</p> +</blockquote> +</li> +</ol> +```````````````````````````````` + + +Indented three spaces: + +```````````````````````````````` example + 1. A paragraph + with two lines. + + indented code + + > A block quote. +. +<ol> +<li> +<p>A paragraph +with two lines.</p> +<pre><code>indented code +</code></pre> +<blockquote> +<p>A block quote.</p> +</blockquote> +</li> +</ol> +```````````````````````````````` + + +Four spaces indent gives a code block: + +```````````````````````````````` example + 1. A paragraph + with two lines. + + indented code + + > A block quote. +. +<pre><code>1. A paragraph + with two lines. + + indented code + + > A block quote. +</code></pre> +```````````````````````````````` + + + +5. **Laziness.** If a string of lines *Ls* constitute a [list + item](#list-items) with contents *Bs*, then the result of deleting + some or all of the indentation from one or more lines in which the + next [non-whitespace character] after the indentation is + [paragraph continuation text] is a + list item with the same contents and attributes. The unindented + lines are called + [lazy continuation line](@)s. + +Here is an example with [lazy continuation lines]: + +```````````````````````````````` example + 1. A paragraph +with two lines. + + indented code + + > A block quote. +. +<ol> +<li> +<p>A paragraph +with two lines.</p> +<pre><code>indented code +</code></pre> +<blockquote> +<p>A block quote.</p> +</blockquote> +</li> +</ol> +```````````````````````````````` + + +Indentation can be partially deleted: + +```````````````````````````````` example + 1. A paragraph + with two lines. +. +<ol> +<li>A paragraph +with two lines.</li> +</ol> +```````````````````````````````` + + +These examples show how laziness can work in nested structures: + +```````````````````````````````` example +> 1. > Blockquote +continued here. +. +<blockquote> +<ol> +<li> +<blockquote> +<p>Blockquote +continued here.</p> +</blockquote> +</li> +</ol> +</blockquote> +```````````````````````````````` + + +```````````````````````````````` example +> 1. > Blockquote +> continued here. +. +<blockquote> +<ol> +<li> +<blockquote> +<p>Blockquote +continued here.</p> +</blockquote> +</li> +</ol> +</blockquote> +```````````````````````````````` + + + +6. **That's all.** Nothing that is not counted as a list item by rules + #1--5 counts as a [list item](#list-items). + +The rules for sublists follow from the general rules +[above][List items]. A sublist must be indented the same number +of spaces a paragraph would need to be in order to be included +in the list item. + +So, in this case we need two spaces indent: + +```````````````````````````````` example +- foo + - bar + - baz + - boo +. +<ul> +<li>foo +<ul> +<li>bar +<ul> +<li>baz +<ul> +<li>boo</li> +</ul> +</li> +</ul> +</li> +</ul> +</li> +</ul> +```````````````````````````````` + + +One is not enough: + +```````````````````````````````` example +- foo + - bar + - baz + - boo +. +<ul> +<li>foo</li> +<li>bar</li> +<li>baz</li> +<li>boo</li> +</ul> +```````````````````````````````` + + +Here we need four, because the list marker is wider: + +```````````````````````````````` example +10) foo + - bar +. +<ol start="10"> +<li>foo +<ul> +<li>bar</li> +</ul> +</li> +</ol> +```````````````````````````````` + + +Three is not enough: + +```````````````````````````````` example +10) foo + - bar +. +<ol start="10"> +<li>foo</li> +</ol> +<ul> +<li>bar</li> +</ul> +```````````````````````````````` + + +A list may be the first block in a list item: + +```````````````````````````````` example +- - foo +. +<ul> +<li> +<ul> +<li>foo</li> +</ul> +</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +1. - 2. foo +. +<ol> +<li> +<ul> +<li> +<ol start="2"> +<li>foo</li> +</ol> +</li> +</ul> +</li> +</ol> +```````````````````````````````` + + +A list item can contain a heading: + +```````````````````````````````` example +- # Foo +- Bar + --- + baz +. +<ul> +<li> +<h1>Foo</h1> +</li> +<li> +<h2>Bar</h2> +baz</li> +</ul> +```````````````````````````````` + + +### Motivation + +John Gruber's Markdown spec says the following about list items: + +1. "List markers typically start at the left margin, but may be indented + by up to three spaces. List markers must be followed by one or more + spaces or a tab." + +2. "To make lists look nice, you can wrap items with hanging indents.... + But if you don't want to, you don't have to." + +3. "List items may consist of multiple paragraphs. Each subsequent + paragraph in a list item must be indented by either 4 spaces or one + tab." + +4. "It looks nice if you indent every line of the subsequent paragraphs, + but here again, Markdown will allow you to be lazy." + +5. "To put a blockquote within a list item, the blockquote's `>` + delimiters need to be indented." + +6. "To put a code block within a list item, the code block needs to be + indented twice — 8 spaces or two tabs." + +These rules specify that a paragraph under a list item must be indented +four spaces (presumably, from the left margin, rather than the start of +the list marker, but this is not said), and that code under a list item +must be indented eight spaces instead of the usual four. They also say +that a block quote must be indented, but not by how much; however, the +example given has four spaces indentation. Although nothing is said +about other kinds of block-level content, it is certainly reasonable to +infer that *all* block elements under a list item, including other +lists, must be indented four spaces. This principle has been called the +*four-space rule*. + +The four-space rule is clear and principled, and if the reference +implementation `Markdown.pl` had followed it, it probably would have +become the standard. However, `Markdown.pl` allowed paragraphs and +sublists to start with only two spaces indentation, at least on the +outer level. Worse, its behavior was inconsistent: a sublist of an +outer-level list needed two spaces indentation, but a sublist of this +sublist needed three spaces. It is not surprising, then, that different +implementations of Markdown have developed very different rules for +determining what comes under a list item. (Pandoc and python-Markdown, +for example, stuck with Gruber's syntax description and the four-space +rule, while discount, redcarpet, marked, PHP Markdown, and others +followed `Markdown.pl`'s behavior more closely.) + +Unfortunately, given the divergences between implementations, there +is no way to give a spec for list items that will be guaranteed not +to break any existing documents. However, the spec given here should +correctly handle lists formatted with either the four-space rule or +the more forgiving `Markdown.pl` behavior, provided they are laid out +in a way that is natural for a human to read. + +The strategy here is to let the width and indentation of the list marker +determine the indentation necessary for blocks to fall under the list +item, rather than having a fixed and arbitrary number. The writer can +think of the body of the list item as a unit which gets indented to the +right enough to fit the list marker (and any indentation on the list +marker). (The laziness rule, #5, then allows continuation lines to be +unindented if needed.) + +This rule is superior, we claim, to any rule requiring a fixed level of +indentation from the margin. The four-space rule is clear but +unnatural. It is quite unintuitive that + +``` markdown +- foo + + bar + + - baz +``` + +should be parsed as two lists with an intervening paragraph, + +``` html +<ul> +<li>foo</li> +</ul> +<p>bar</p> +<ul> +<li>baz</li> +</ul> +``` + +as the four-space rule demands, rather than a single list, + +``` html +<ul> +<li> +<p>foo</p> +<p>bar</p> +<ul> +<li>baz</li> +</ul> +</li> +</ul> +``` + +The choice of four spaces is arbitrary. It can be learned, but it is +not likely to be guessed, and it trips up beginners regularly. + +Would it help to adopt a two-space rule? The problem is that such +a rule, together with the rule allowing 1--3 spaces indentation of the +initial list marker, allows text that is indented *less than* the +original list marker to be included in the list item. For example, +`Markdown.pl` parses + +``` markdown + - one + + two +``` + +as a single list item, with `two` a continuation paragraph: + +``` html +<ul> +<li> +<p>one</p> +<p>two</p> +</li> +</ul> +``` + +and similarly + +``` markdown +> - one +> +> two +``` + +as + +``` html +<blockquote> +<ul> +<li> +<p>one</p> +<p>two</p> +</li> +</ul> +</blockquote> +``` + +This is extremely unintuitive. + +Rather than requiring a fixed indent from the margin, we could require +a fixed indent (say, two spaces, or even one space) from the list marker (which +may itself be indented). This proposal would remove the last anomaly +discussed. Unlike the spec presented above, it would count the following +as a list item with a subparagraph, even though the paragraph `bar` +is not indented as far as the first paragraph `foo`: + +``` markdown + 10. foo + + bar +``` + +Arguably this text does read like a list item with `bar` as a subparagraph, +which may count in favor of the proposal. However, on this proposal indented +code would have to be indented six spaces after the list marker. And this +would break a lot of existing Markdown, which has the pattern: + +``` markdown +1. foo + + indented code +``` + +where the code is indented eight spaces. The spec above, by contrast, will +parse this text as expected, since the code block's indentation is measured +from the beginning of `foo`. + +The one case that needs special treatment is a list item that *starts* +with indented code. How much indentation is required in that case, since +we don't have a "first paragraph" to measure from? Rule #2 simply stipulates +that in such cases, we require one space indentation from the list marker +(and then the normal four spaces for the indented code). This will match the +four-space rule in cases where the list marker plus its initial indentation +takes four spaces (a common case), but diverge in other cases. + +## Lists + +A [list](@) is a sequence of one or more +list items [of the same type]. The list items +may be separated by any number of blank lines. + +Two list items are [of the same type](@) +if they begin with a [list marker] of the same type. +Two list markers are of the +same type if (a) they are bullet list markers using the same character +(`-`, `+`, or `*`) or (b) they are ordered list numbers with the same +delimiter (either `.` or `)`). + +A list is an [ordered list](@) +if its constituent list items begin with +[ordered list markers], and a +[bullet list](@) if its constituent list +items begin with [bullet list markers]. + +The [start number](@) +of an [ordered list] is determined by the list number of +its initial list item. The numbers of subsequent list items are +disregarded. + +A list is [loose](@) if any of its constituent +list items are separated by blank lines, or if any of its constituent +list items directly contain two block-level elements with a blank line +between them. Otherwise a list is [tight](@). +(The difference in HTML output is that paragraphs in a loose list are +wrapped in `<p>` tags, while paragraphs in a tight list are not.) + +Changing the bullet or ordered list delimiter starts a new list: + +```````````````````````````````` example +- foo +- bar ++ baz +. +<ul> +<li>foo</li> +<li>bar</li> +</ul> +<ul> +<li>baz</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +1. foo +2. bar +3) baz +. +<ol> +<li>foo</li> +<li>bar</li> +</ol> +<ol start="3"> +<li>baz</li> +</ol> +```````````````````````````````` + + +In CommonMark, a list can interrupt a paragraph. That is, +no blank line is needed to separate a paragraph from a following +list: + +```````````````````````````````` example +Foo +- bar +- baz +. +<p>Foo</p> +<ul> +<li>bar</li> +<li>baz</li> +</ul> +```````````````````````````````` + +`Markdown.pl` does not allow this, through fear of triggering a list +via a numeral in a hard-wrapped line: + +``` markdown +The number of windows in my house is +14. The number of doors is 6. +``` + +Oddly, though, `Markdown.pl` *does* allow a blockquote to +interrupt a paragraph, even though the same considerations might +apply. + +In CommonMark, we do allow lists to interrupt paragraphs, for +two reasons. First, it is natural and not uncommon for people +to start lists without blank lines: + +``` markdown +I need to buy +- new shoes +- a coat +- a plane ticket +``` + +Second, we are attracted to a + +> [principle of uniformity](@): +> if a chunk of text has a certain +> meaning, it will continue to have the same meaning when put into a +> container block (such as a list item or blockquote). + +(Indeed, the spec for [list items] and [block quotes] presupposes +this principle.) This principle implies that if + +``` markdown + * I need to buy + - new shoes + - a coat + - a plane ticket +``` + +is a list item containing a paragraph followed by a nested sublist, +as all Markdown implementations agree it is (though the paragraph +may be rendered without `<p>` tags, since the list is "tight"), +then + +``` markdown +I need to buy +- new shoes +- a coat +- a plane ticket +``` + +by itself should be a paragraph followed by a nested sublist. + +Since it is well established Markdown practice to allow lists to +interrupt paragraphs inside list items, the [principle of +uniformity] requires us to allow this outside list items as +well. ([reStructuredText](http://docutils.sourceforge.net/rst.html) +takes a different approach, requiring blank lines before lists +even inside other list items.) + +In order to solve of unwanted lists in paragraphs with +hard-wrapped numerals, we allow only lists starting with `1` to +interrupt paragraphs. Thus, + +```````````````````````````````` example +The number of windows in my house is +14. The number of doors is 6. +. +<p>The number of windows in my house is +14. The number of doors is 6.</p> +```````````````````````````````` + +We may still get an unintended result in cases like + +```````````````````````````````` example +The number of windows in my house is +1. The number of doors is 6. +. +<p>The number of windows in my house is</p> +<ol> +<li>The number of doors is 6.</li> +</ol> +```````````````````````````````` + +but this rule should prevent most spurious list captures. + +There can be any number of blank lines between items: + +```````````````````````````````` example +- foo + +- bar + + +- baz +. +<ul> +<li> +<p>foo</p> +</li> +<li> +<p>bar</p> +</li> +<li> +<p>baz</p> +</li> +</ul> +```````````````````````````````` + +```````````````````````````````` example +- foo + - bar + - baz + + + bim +. +<ul> +<li>foo +<ul> +<li>bar +<ul> +<li> +<p>baz</p> +<p>bim</p> +</li> +</ul> +</li> +</ul> +</li> +</ul> +```````````````````````````````` + + +To separate consecutive lists of the same type, or to separate a +list from an indented code block that would otherwise be parsed +as a subparagraph of the final list item, you can insert a blank HTML +comment: + +```````````````````````````````` example +- foo +- bar + +<!-- --> + +- baz +- bim +. +<ul> +<li>foo</li> +<li>bar</li> +</ul> +<!-- --> +<ul> +<li>baz</li> +<li>bim</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +- foo + + notcode + +- foo + +<!-- --> + + code +. +<ul> +<li> +<p>foo</p> +<p>notcode</p> +</li> +<li> +<p>foo</p> +</li> +</ul> +<!-- --> +<pre><code>code +</code></pre> +```````````````````````````````` + + +List items need not be indented to the same level. The following +list items will be treated as items at the same list level, +since none is indented enough to belong to the previous list +item: + +```````````````````````````````` example +- a + - b + - c + - d + - e + - f +- g +. +<ul> +<li>a</li> +<li>b</li> +<li>c</li> +<li>d</li> +<li>e</li> +<li>f</li> +<li>g</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +1. a + + 2. b + + 3. c +. +<ol> +<li> +<p>a</p> +</li> +<li> +<p>b</p> +</li> +<li> +<p>c</p> +</li> +</ol> +```````````````````````````````` + +Note, however, that list items may not be indented more than +three spaces. Here `- e` is treated as a paragraph continuation +line, because it is indented more than three spaces: + +```````````````````````````````` example +- a + - b + - c + - d + - e +. +<ul> +<li>a</li> +<li>b</li> +<li>c</li> +<li>d +- e</li> +</ul> +```````````````````````````````` + +And here, `3. c` is treated as in indented code block, +because it is indented four spaces and preceded by a +blank line. + +```````````````````````````````` example +1. a + + 2. b + + 3. c +. +<ol> +<li> +<p>a</p> +</li> +<li> +<p>b</p> +</li> +</ol> +<pre><code>3. c +</code></pre> +```````````````````````````````` + + +This is a loose list, because there is a blank line between +two of the list items: + +```````````````````````````````` example +- a +- b + +- c +. +<ul> +<li> +<p>a</p> +</li> +<li> +<p>b</p> +</li> +<li> +<p>c</p> +</li> +</ul> +```````````````````````````````` + + +So is this, with a empty second item: + +```````````````````````````````` example +* a +* + +* c +. +<ul> +<li> +<p>a</p> +</li> +<li></li> +<li> +<p>c</p> +</li> +</ul> +```````````````````````````````` + + +These are loose lists, even though there is no space between the items, +because one of the items directly contains two block-level elements +with a blank line between them: + +```````````````````````````````` example +- a +- b + + c +- d +. +<ul> +<li> +<p>a</p> +</li> +<li> +<p>b</p> +<p>c</p> +</li> +<li> +<p>d</p> +</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +- a +- b + + [ref]: /url +- d +. +<ul> +<li> +<p>a</p> +</li> +<li> +<p>b</p> +</li> +<li> +<p>d</p> +</li> +</ul> +```````````````````````````````` + + +This is a tight list, because the blank lines are in a code block: + +```````````````````````````````` example +- a +- ``` + b + + + ``` +- c +. +<ul> +<li>a</li> +<li> +<pre><code>b + + +</code></pre> +</li> +<li>c</li> +</ul> +```````````````````````````````` + + +This is a tight list, because the blank line is between two +paragraphs of a sublist. So the sublist is loose while +the outer list is tight: + +```````````````````````````````` example +- a + - b + + c +- d +. +<ul> +<li>a +<ul> +<li> +<p>b</p> +<p>c</p> +</li> +</ul> +</li> +<li>d</li> +</ul> +```````````````````````````````` + + +This is a tight list, because the blank line is inside the +block quote: + +```````````````````````````````` example +* a + > b + > +* c +. +<ul> +<li>a +<blockquote> +<p>b</p> +</blockquote> +</li> +<li>c</li> +</ul> +```````````````````````````````` + + +This list is tight, because the consecutive block elements +are not separated by blank lines: + +```````````````````````````````` example +- a + > b + ``` + c + ``` +- d +. +<ul> +<li>a +<blockquote> +<p>b</p> +</blockquote> +<pre><code>c +</code></pre> +</li> +<li>d</li> +</ul> +```````````````````````````````` + + +A single-paragraph list is tight: + +```````````````````````````````` example +- a +. +<ul> +<li>a</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +- a + - b +. +<ul> +<li>a +<ul> +<li>b</li> +</ul> +</li> +</ul> +```````````````````````````````` + + +This list is loose, because of the blank line between the +two block elements in the list item: + +```````````````````````````````` example +1. ``` + foo + ``` + + bar +. +<ol> +<li> +<pre><code>foo +</code></pre> +<p>bar</p> +</li> +</ol> +```````````````````````````````` + + +Here the outer list is loose, the inner list tight: + +```````````````````````````````` example +* foo + * bar + + baz +. +<ul> +<li> +<p>foo</p> +<ul> +<li>bar</li> +</ul> +<p>baz</p> +</li> +</ul> +```````````````````````````````` + + +```````````````````````````````` example +- a + - b + - c + +- d + - e + - f +. +<ul> +<li> +<p>a</p> +<ul> +<li>b</li> +<li>c</li> +</ul> +</li> +<li> +<p>d</p> +<ul> +<li>e</li> +<li>f</li> +</ul> +</li> +</ul> +```````````````````````````````` + + +# Inlines + +Inlines are parsed sequentially from the beginning of the character +stream to the end (left to right, in left-to-right languages). +Thus, for example, in + +```````````````````````````````` example +`hi`lo` +. +<p><code>hi</code>lo`</p> +```````````````````````````````` + +`hi` is parsed as code, leaving the backtick at the end as a literal +backtick. + + +## Backslash escapes + +Any ASCII punctuation character may be backslash-escaped: + +```````````````````````````````` example +\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~ +. +<p>!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~</p> +```````````````````````````````` + + +Backslashes before other characters are treated as literal +backslashes: + +```````````````````````````````` example +\→\A\a\ \3\φ\« +. +<p>\→\A\a\ \3\φ\«</p> +```````````````````````````````` + + +Escaped characters are treated as regular characters and do +not have their usual Markdown meanings: + +```````````````````````````````` example +\*not emphasized* +\<br/> not a tag +\[not a link](/foo) +\`not code` +1\. not a list +\* not a list +\# not a heading +\[foo]: /url "not a reference" +\ö not a character entity +. +<p>*not emphasized* +<br/> not a tag +[not a link](/foo) +`not code` +1. not a list +* not a list +# not a heading +[foo]: /url "not a reference" +&ouml; not a character entity</p> +```````````````````````````````` + + +If a backslash is itself escaped, the following character is not: + +```````````````````````````````` example +\\*emphasis* +. +<p>\<em>emphasis</em></p> +```````````````````````````````` + + +A backslash at the end of the line is a [hard line break]: + +```````````````````````````````` example +foo\ +bar +. +<p>foo<br /> +bar</p> +```````````````````````````````` + + +Backslash escapes do not work in code blocks, code spans, autolinks, or +raw HTML: + +```````````````````````````````` example +`` \[\` `` +. +<p><code>\[\`</code></p> +```````````````````````````````` + + +```````````````````````````````` example + \[\] +. +<pre><code>\[\] +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +~~~ +\[\] +~~~ +. +<pre><code>\[\] +</code></pre> +```````````````````````````````` + + +```````````````````````````````` example +<http://example.com?find=\*> +. +<p><a href="http://example.com?find=%5C*">http://example.com?find=\*</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<a href="/bar\/)"> +. +<a href="/bar\/)"> +```````````````````````````````` + + +But they work in all other contexts, including URLs and link titles, +link references, and [info strings] in [fenced code blocks]: + +```````````````````````````````` example +[foo](/bar\* "ti\*tle") +. +<p><a href="/bar*" title="ti*tle">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo] + +[foo]: /bar\* "ti\*tle" +. +<p><a href="/bar*" title="ti*tle">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +``` foo\+bar +foo +``` +. +<pre><code class="language-foo+bar">foo +</code></pre> +```````````````````````````````` + + + +## Entity and numeric character references + +Valid HTML entity references and numeric character references +can be used in place of the corresponding Unicode character, +with the following exceptions: + +- Entity and character references are not recognized in code + blocks and code spans. + +- Entity and character references cannot stand in place of + special characters that define structural elements in + CommonMark. For example, although `*` can be used + in place of a literal `*` character, `*` cannot replace + `*` in emphasis delimiters, bullet list markers, or thematic + breaks. + +Conforming CommonMark parsers need not store information about +whether a particular character was represented in the source +using a Unicode character or an entity reference. + +[Entity references](@) consist of `&` + any of the valid +HTML5 entity names + `;`. The +document <https://html.spec.whatwg.org/multipage/entities.json> +is used as an authoritative source for the valid entity +references and their corresponding code points. + +```````````````````````````````` example +  & © Æ Ď +¾ ℋ ⅆ +∲ ≧̸ +. +<p>  & © Æ Ď +¾ ℋ ⅆ +∲ ≧̸</p> +```````````````````````````````` + + +[Decimal numeric character +references](@) +consist of `&#` + a string of 1--7 arabic digits + `;`. A +numeric character reference is parsed as the corresponding +Unicode character. Invalid Unicode code points will be replaced by +the REPLACEMENT CHARACTER (`U+FFFD`). For security reasons, +the code point `U+0000` will also be replaced by `U+FFFD`. + +```````````````````````````````` example +# Ӓ Ϡ � +. +<p># Ӓ Ϡ �</p> +```````````````````````````````` + + +[Hexadecimal numeric character +references](@) consist of `&#` + +either `X` or `x` + a string of 1-6 hexadecimal digits + `;`. +They too are parsed as the corresponding Unicode character (this +time specified with a hexadecimal numeral instead of decimal). + +```````````````````````````````` example +" ആ ಫ +. +<p>" ആ ಫ</p> +```````````````````````````````` + + +Here are some nonentities: + +```````````````````````````````` example +  &x; &#; &#x; +� +&#abcdef0; +&ThisIsNotDefined; &hi?; +. +<p>&nbsp &x; &#; &#x; +&#987654321; +&#abcdef0; +&ThisIsNotDefined; &hi?;</p> +```````````````````````````````` + + +Although HTML5 does accept some entity references +without a trailing semicolon (such as `©`), these are not +recognized here, because it makes the grammar too ambiguous: + +```````````````````````````````` example +© +. +<p>&copy</p> +```````````````````````````````` + + +Strings that are not on the list of HTML5 named entities are not +recognized as entity references either: + +```````````````````````````````` example +&MadeUpEntity; +. +<p>&MadeUpEntity;</p> +```````````````````````````````` + + +Entity and numeric character references are recognized in any +context besides code spans or code blocks, including +URLs, [link titles], and [fenced code block][] [info strings]: + +```````````````````````````````` example +<a href="öö.html"> +. +<a href="öö.html"> +```````````````````````````````` + + +```````````````````````````````` example +[foo](/föö "föö") +. +<p><a href="/f%C3%B6%C3%B6" title="föö">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo] + +[foo]: /föö "föö" +. +<p><a href="/f%C3%B6%C3%B6" title="föö">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +``` föö +foo +``` +. +<pre><code class="language-föö">foo +</code></pre> +```````````````````````````````` + + +Entity and numeric character references are treated as literal +text in code spans and code blocks: + +```````````````````````````````` example +`föö` +. +<p><code>f&ouml;&ouml;</code></p> +```````````````````````````````` + + +```````````````````````````````` example + föfö +. +<pre><code>f&ouml;f&ouml; +</code></pre> +```````````````````````````````` + + +Entity and numeric character references cannot be used +in place of symbols indicating structure in CommonMark +documents. + +```````````````````````````````` example +*foo* +*foo* +. +<p>*foo* +<em>foo</em></p> +```````````````````````````````` + +```````````````````````````````` example +* foo + +* foo +. +<p>* foo</p> +<ul> +<li>foo</li> +</ul> +```````````````````````````````` + +```````````````````````````````` example +foo bar +. +<p>foo + +bar</p> +```````````````````````````````` + +```````````````````````````````` example + foo +. +<p>→foo</p> +```````````````````````````````` + + +```````````````````````````````` example +[a](url "tit") +. +<p>[a](url "tit")</p> +```````````````````````````````` + + +## Code spans + +A [backtick string](@) +is a string of one or more backtick characters (`` ` ``) that is neither +preceded nor followed by a backtick. + +A [code span](@) begins with a backtick string and ends with +a backtick string of equal length. The contents of the code span are +the characters between the two backtick strings, normalized in the +following ways: + +- First, [line endings] are converted to [spaces]. +- If the resulting string both begins *and* ends with a [space] + character, but does not consist entirely of [space] + characters, a single [space] character is removed from the + front and back. This allows you to include code that begins + or ends with backtick characters, which must be separated by + whitespace from the opening or closing backtick strings. + +This is a simple code span: + +```````````````````````````````` example +`foo` +. +<p><code>foo</code></p> +```````````````````````````````` + + +Here two backticks are used, because the code contains a backtick. +This example also illustrates stripping of a single leading and +trailing space: + +```````````````````````````````` example +`` foo ` bar `` +. +<p><code>foo ` bar</code></p> +```````````````````````````````` + + +This example shows the motivation for stripping leading and trailing +spaces: + +```````````````````````````````` example +` `` ` +. +<p><code>``</code></p> +```````````````````````````````` + +Note that only *one* space is stripped: + +```````````````````````````````` example +` `` ` +. +<p><code> `` </code></p> +```````````````````````````````` + +The stripping only happens if the space is on both +sides of the string: + +```````````````````````````````` example +` a` +. +<p><code> a</code></p> +```````````````````````````````` + +Only [spaces], and not [unicode whitespace] in general, are +stripped in this way: + +```````````````````````````````` example +` b ` +. +<p><code> b </code></p> +```````````````````````````````` + +No stripping occurs if the code span contains only spaces: + +```````````````````````````````` example +` ` +` ` +. +<p><code> </code> +<code> </code></p> +```````````````````````````````` + + +[Line endings] are treated like spaces: + +```````````````````````````````` example +`` +foo +bar +baz +`` +. +<p><code>foo bar baz</code></p> +```````````````````````````````` + +```````````````````````````````` example +`` +foo +`` +. +<p><code>foo </code></p> +```````````````````````````````` + + +Interior spaces are not collapsed: + +```````````````````````````````` example +`foo bar +baz` +. +<p><code>foo bar baz</code></p> +```````````````````````````````` + +Note that browsers will typically collapse consecutive spaces +when rendering `<code>` elements, so it is recommended that +the following CSS be used: + + code{white-space: pre-wrap;} + + +Note that backslash escapes do not work in code spans. All backslashes +are treated literally: + +```````````````````````````````` example +`foo\`bar` +. +<p><code>foo\</code>bar`</p> +```````````````````````````````` + + +Backslash escapes are never needed, because one can always choose a +string of *n* backtick characters as delimiters, where the code does +not contain any strings of exactly *n* backtick characters. + +```````````````````````````````` example +``foo`bar`` +. +<p><code>foo`bar</code></p> +```````````````````````````````` + +```````````````````````````````` example +` foo `` bar ` +. +<p><code>foo `` bar</code></p> +```````````````````````````````` + + +Code span backticks have higher precedence than any other inline +constructs except HTML tags and autolinks. Thus, for example, this is +not parsed as emphasized text, since the second `*` is part of a code +span: + +```````````````````````````````` example +*foo`*` +. +<p>*foo<code>*</code></p> +```````````````````````````````` + + +And this is not parsed as a link: + +```````````````````````````````` example +[not a `link](/foo`) +. +<p>[not a <code>link](/foo</code>)</p> +```````````````````````````````` + + +Code spans, HTML tags, and autolinks have the same precedence. +Thus, this is code: + +```````````````````````````````` example +`<a href="`">` +. +<p><code><a href="</code>">`</p> +```````````````````````````````` + + +But this is an HTML tag: + +```````````````````````````````` example +<a href="`">` +. +<p><a href="`">`</p> +```````````````````````````````` + + +And this is code: + +```````````````````````````````` example +`<http://foo.bar.`baz>` +. +<p><code><http://foo.bar.</code>baz>`</p> +```````````````````````````````` + + +But this is an autolink: + +```````````````````````````````` example +<http://foo.bar.`baz>` +. +<p><a href="http://foo.bar.%60baz">http://foo.bar.`baz</a>`</p> +```````````````````````````````` + + +When a backtick string is not closed by a matching backtick string, +we just have literal backticks: + +```````````````````````````````` example +```foo`` +. +<p>```foo``</p> +```````````````````````````````` + + +```````````````````````````````` example +`foo +. +<p>`foo</p> +```````````````````````````````` + +The following case also illustrates the need for opening and +closing backtick strings to be equal in length: + +```````````````````````````````` example +`foo``bar`` +. +<p>`foo<code>bar</code></p> +```````````````````````````````` + + +## Emphasis and strong emphasis + +John Gruber's original [Markdown syntax +description](http://daringfireball.net/projects/markdown/syntax#em) says: + +> Markdown treats asterisks (`*`) and underscores (`_`) as indicators of +> emphasis. Text wrapped with one `*` or `_` will be wrapped with an HTML +> `<em>` tag; double `*`'s or `_`'s will be wrapped with an HTML `<strong>` +> tag. + +This is enough for most users, but these rules leave much undecided, +especially when it comes to nested emphasis. The original +`Markdown.pl` test suite makes it clear that triple `***` and +`___` delimiters can be used for strong emphasis, and most +implementations have also allowed the following patterns: + +``` markdown +***strong emph*** +***strong** in emph* +***emph* in strong** +**in strong *emph*** +*in emph **strong*** +``` + +The following patterns are less widely supported, but the intent +is clear and they are useful (especially in contexts like bibliography +entries): + +``` markdown +*emph *with emph* in it* +**strong **with strong** in it** +``` + +Many implementations have also restricted intraword emphasis to +the `*` forms, to avoid unwanted emphasis in words containing +internal underscores. (It is best practice to put these in code +spans, but users often do not.) + +``` markdown +internal emphasis: foo*bar*baz +no emphasis: foo_bar_baz +``` + +The rules given below capture all of these patterns, while allowing +for efficient parsing strategies that do not backtrack. + +First, some definitions. A [delimiter run](@) is either +a sequence of one or more `*` characters that is not preceded or +followed by a non-backslash-escaped `*` character, or a sequence +of one or more `_` characters that is not preceded or followed by +a non-backslash-escaped `_` character. + +A [left-flanking delimiter run](@) is +a [delimiter run] that is (1) not followed by [Unicode whitespace], +and either (2a) not followed by a [punctuation character], or +(2b) followed by a [punctuation character] and +preceded by [Unicode whitespace] or a [punctuation character]. +For purposes of this definition, the beginning and the end of +the line count as Unicode whitespace. + +A [right-flanking delimiter run](@) is +a [delimiter run] that is (1) not preceded by [Unicode whitespace], +and either (2a) not preceded by a [punctuation character], or +(2b) preceded by a [punctuation character] and +followed by [Unicode whitespace] or a [punctuation character]. +For purposes of this definition, the beginning and the end of +the line count as Unicode whitespace. + +Here are some examples of delimiter runs. + + - left-flanking but not right-flanking: + + ``` + ***abc + _abc + **"abc" + _"abc" + ``` + + - right-flanking but not left-flanking: + + ``` + abc*** + abc_ + "abc"** + "abc"_ + ``` + + - Both left and right-flanking: + + ``` + abc***def + "abc"_"def" + ``` + + - Neither left nor right-flanking: + + ``` + abc *** def + a _ b + ``` + +(The idea of distinguishing left-flanking and right-flanking +delimiter runs based on the character before and the character +after comes from Roopesh Chander's +[vfmd](http://www.vfmd.org/vfmd-spec/specification/#procedure-for-identifying-emphasis-tags). +vfmd uses the terminology "emphasis indicator string" instead of "delimiter +run," and its rules for distinguishing left- and right-flanking runs +are a bit more complex than the ones given here.) + +The following rules define emphasis and strong emphasis: + +1. A single `*` character [can open emphasis](@) + iff (if and only if) it is part of a [left-flanking delimiter run]. + +2. A single `_` character [can open emphasis] iff + it is part of a [left-flanking delimiter run] + and either (a) not part of a [right-flanking delimiter run] + or (b) part of a [right-flanking delimiter run] + preceded by punctuation. + +3. A single `*` character [can close emphasis](@) + iff it is part of a [right-flanking delimiter run]. + +4. A single `_` character [can close emphasis] iff + it is part of a [right-flanking delimiter run] + and either (a) not part of a [left-flanking delimiter run] + or (b) part of a [left-flanking delimiter run] + followed by punctuation. + +5. A double `**` [can open strong emphasis](@) + iff it is part of a [left-flanking delimiter run]. + +6. A double `__` [can open strong emphasis] iff + it is part of a [left-flanking delimiter run] + and either (a) not part of a [right-flanking delimiter run] + or (b) part of a [right-flanking delimiter run] + preceded by punctuation. + +7. A double `**` [can close strong emphasis](@) + iff it is part of a [right-flanking delimiter run]. + +8. A double `__` [can close strong emphasis] iff + it is part of a [right-flanking delimiter run] + and either (a) not part of a [left-flanking delimiter run] + or (b) part of a [left-flanking delimiter run] + followed by punctuation. + +9. Emphasis begins with a delimiter that [can open emphasis] and ends + with a delimiter that [can close emphasis], and that uses the same + character (`_` or `*`) as the opening delimiter. The + opening and closing delimiters must belong to separate + [delimiter runs]. If one of the delimiters can both + open and close emphasis, then the sum of the lengths of the + delimiter runs containing the opening and closing delimiters + must not be a multiple of 3 unless both lengths are + multiples of 3. + +10. Strong emphasis begins with a delimiter that + [can open strong emphasis] and ends with a delimiter that + [can close strong emphasis], and that uses the same character + (`_` or `*`) as the opening delimiter. The + opening and closing delimiters must belong to separate + [delimiter runs]. If one of the delimiters can both open + and close strong emphasis, then the sum of the lengths of + the delimiter runs containing the opening and closing + delimiters must not be a multiple of 3 unless both lengths + are multiples of 3. + +11. A literal `*` character cannot occur at the beginning or end of + `*`-delimited emphasis or `**`-delimited strong emphasis, unless it + is backslash-escaped. + +12. A literal `_` character cannot occur at the beginning or end of + `_`-delimited emphasis or `__`-delimited strong emphasis, unless it + is backslash-escaped. + +Where rules 1--12 above are compatible with multiple parsings, +the following principles resolve ambiguity: + +13. The number of nestings should be minimized. Thus, for example, + an interpretation `<strong>...</strong>` is always preferred to + `<em><em>...</em></em>`. + +14. An interpretation `<em><strong>...</strong></em>` is always + preferred to `<strong><em>...</em></strong>`. + +15. When two potential emphasis or strong emphasis spans overlap, + so that the second begins before the first ends and ends after + the first ends, the first takes precedence. Thus, for example, + `*foo _bar* baz_` is parsed as `<em>foo _bar</em> baz_` rather + than `*foo <em>bar* baz</em>`. + +16. When there are two potential emphasis or strong emphasis spans + with the same closing delimiter, the shorter one (the one that + opens later) takes precedence. Thus, for example, + `**foo **bar baz**` is parsed as `**foo <strong>bar baz</strong>` + rather than `<strong>foo **bar baz</strong>`. + +17. Inline code spans, links, images, and HTML tags group more tightly + than emphasis. So, when there is a choice between an interpretation + that contains one of these elements and one that does not, the + former always wins. Thus, for example, `*[foo*](bar)` is + parsed as `*<a href="bar">foo*</a>` rather than as + `<em>[foo</em>](bar)`. + +These rules can be illustrated through a series of examples. + +Rule 1: + +```````````````````````````````` example +*foo bar* +. +<p><em>foo bar</em></p> +```````````````````````````````` + + +This is not emphasis, because the opening `*` is followed by +whitespace, and hence not part of a [left-flanking delimiter run]: + +```````````````````````````````` example +a * foo bar* +. +<p>a * foo bar*</p> +```````````````````````````````` + + +This is not emphasis, because the opening `*` is preceded +by an alphanumeric and followed by punctuation, and hence +not part of a [left-flanking delimiter run]: + +```````````````````````````````` example +a*"foo"* +. +<p>a*"foo"*</p> +```````````````````````````````` + + +Unicode nonbreaking spaces count as whitespace, too: + +```````````````````````````````` example +* a * +. +<p>* a *</p> +```````````````````````````````` + + +Intraword emphasis with `*` is permitted: + +```````````````````````````````` example +foo*bar* +. +<p>foo<em>bar</em></p> +```````````````````````````````` + + +```````````````````````````````` example +5*6*78 +. +<p>5<em>6</em>78</p> +```````````````````````````````` + + +Rule 2: + +```````````````````````````````` example +_foo bar_ +. +<p><em>foo bar</em></p> +```````````````````````````````` + + +This is not emphasis, because the opening `_` is followed by +whitespace: + +```````````````````````````````` example +_ foo bar_ +. +<p>_ foo bar_</p> +```````````````````````````````` + + +This is not emphasis, because the opening `_` is preceded +by an alphanumeric and followed by punctuation: + +```````````````````````````````` example +a_"foo"_ +. +<p>a_"foo"_</p> +```````````````````````````````` + + +Emphasis with `_` is not allowed inside words: + +```````````````````````````````` example +foo_bar_ +. +<p>foo_bar_</p> +```````````````````````````````` + + +```````````````````````````````` example +5_6_78 +. +<p>5_6_78</p> +```````````````````````````````` + + +```````````````````````````````` example +пристаням_стремятся_ +. +<p>пристаням_стремятся_</p> +```````````````````````````````` + + +Here `_` does not generate emphasis, because the first delimiter run +is right-flanking and the second left-flanking: + +```````````````````````````````` example +aa_"bb"_cc +. +<p>aa_"bb"_cc</p> +```````````````````````````````` + + +This is emphasis, even though the opening delimiter is +both left- and right-flanking, because it is preceded by +punctuation: + +```````````````````````````````` example +foo-_(bar)_ +. +<p>foo-<em>(bar)</em></p> +```````````````````````````````` + + +Rule 3: + +This is not emphasis, because the closing delimiter does +not match the opening delimiter: + +```````````````````````````````` example +_foo* +. +<p>_foo*</p> +```````````````````````````````` + + +This is not emphasis, because the closing `*` is preceded by +whitespace: + +```````````````````````````````` example +*foo bar * +. +<p>*foo bar *</p> +```````````````````````````````` + + +A newline also counts as whitespace: + +```````````````````````````````` example +*foo bar +* +. +<p>*foo bar +*</p> +```````````````````````````````` + + +This is not emphasis, because the second `*` is +preceded by punctuation and followed by an alphanumeric +(hence it is not part of a [right-flanking delimiter run]: + +```````````````````````````````` example +*(*foo) +. +<p>*(*foo)</p> +```````````````````````````````` + + +The point of this restriction is more easily appreciated +with this example: + +```````````````````````````````` example +*(*foo*)* +. +<p><em>(<em>foo</em>)</em></p> +```````````````````````````````` + + +Intraword emphasis with `*` is allowed: + +```````````````````````````````` example +*foo*bar +. +<p><em>foo</em>bar</p> +```````````````````````````````` + + + +Rule 4: + +This is not emphasis, because the closing `_` is preceded by +whitespace: + +```````````````````````````````` example +_foo bar _ +. +<p>_foo bar _</p> +```````````````````````````````` + + +This is not emphasis, because the second `_` is +preceded by punctuation and followed by an alphanumeric: + +```````````````````````````````` example +_(_foo) +. +<p>_(_foo)</p> +```````````````````````````````` + + +This is emphasis within emphasis: + +```````````````````````````````` example +_(_foo_)_ +. +<p><em>(<em>foo</em>)</em></p> +```````````````````````````````` + + +Intraword emphasis is disallowed for `_`: + +```````````````````````````````` example +_foo_bar +. +<p>_foo_bar</p> +```````````````````````````````` + + +```````````````````````````````` example +_пристаням_стремятся +. +<p>_пристаням_стремятся</p> +```````````````````````````````` + + +```````````````````````````````` example +_foo_bar_baz_ +. +<p><em>foo_bar_baz</em></p> +```````````````````````````````` + + +This is emphasis, even though the closing delimiter is +both left- and right-flanking, because it is followed by +punctuation: + +```````````````````````````````` example +_(bar)_. +. +<p><em>(bar)</em>.</p> +```````````````````````````````` + + +Rule 5: + +```````````````````````````````` example +**foo bar** +. +<p><strong>foo bar</strong></p> +```````````````````````````````` + + +This is not strong emphasis, because the opening delimiter is +followed by whitespace: + +```````````````````````````````` example +** foo bar** +. +<p>** foo bar**</p> +```````````````````````````````` + + +This is not strong emphasis, because the opening `**` is preceded +by an alphanumeric and followed by punctuation, and hence +not part of a [left-flanking delimiter run]: + +```````````````````````````````` example +a**"foo"** +. +<p>a**"foo"**</p> +```````````````````````````````` + + +Intraword strong emphasis with `**` is permitted: + +```````````````````````````````` example +foo**bar** +. +<p>foo<strong>bar</strong></p> +```````````````````````````````` + + +Rule 6: + +```````````````````````````````` example +__foo bar__ +. +<p><strong>foo bar</strong></p> +```````````````````````````````` + + +This is not strong emphasis, because the opening delimiter is +followed by whitespace: + +```````````````````````````````` example +__ foo bar__ +. +<p>__ foo bar__</p> +```````````````````````````````` + + +A newline counts as whitespace: +```````````````````````````````` example +__ +foo bar__ +. +<p>__ +foo bar__</p> +```````````````````````````````` + + +This is not strong emphasis, because the opening `__` is preceded +by an alphanumeric and followed by punctuation: + +```````````````````````````````` example +a__"foo"__ +. +<p>a__"foo"__</p> +```````````````````````````````` + + +Intraword strong emphasis is forbidden with `__`: + +```````````````````````````````` example +foo__bar__ +. +<p>foo__bar__</p> +```````````````````````````````` + + +```````````````````````````````` example +5__6__78 +. +<p>5__6__78</p> +```````````````````````````````` + + +```````````````````````````````` example +пристаням__стремятся__ +. +<p>пристаням__стремятся__</p> +```````````````````````````````` + + +```````````````````````````````` example +__foo, __bar__, baz__ +. +<p><strong>foo, <strong>bar</strong>, baz</strong></p> +```````````````````````````````` + + +This is strong emphasis, even though the opening delimiter is +both left- and right-flanking, because it is preceded by +punctuation: + +```````````````````````````````` example +foo-__(bar)__ +. +<p>foo-<strong>(bar)</strong></p> +```````````````````````````````` + + + +Rule 7: + +This is not strong emphasis, because the closing delimiter is preceded +by whitespace: + +```````````````````````````````` example +**foo bar ** +. +<p>**foo bar **</p> +```````````````````````````````` + + +(Nor can it be interpreted as an emphasized `*foo bar *`, because of +Rule 11.) + +This is not strong emphasis, because the second `**` is +preceded by punctuation and followed by an alphanumeric: + +```````````````````````````````` example +**(**foo) +. +<p>**(**foo)</p> +```````````````````````````````` + + +The point of this restriction is more easily appreciated +with these examples: + +```````````````````````````````` example +*(**foo**)* +. +<p><em>(<strong>foo</strong>)</em></p> +```````````````````````````````` + + +```````````````````````````````` example +**Gomphocarpus (*Gomphocarpus physocarpus*, syn. +*Asclepias physocarpa*)** +. +<p><strong>Gomphocarpus (<em>Gomphocarpus physocarpus</em>, syn. +<em>Asclepias physocarpa</em>)</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo "*bar*" foo** +. +<p><strong>foo "<em>bar</em>" foo</strong></p> +```````````````````````````````` + + +Intraword emphasis: + +```````````````````````````````` example +**foo**bar +. +<p><strong>foo</strong>bar</p> +```````````````````````````````` + + +Rule 8: + +This is not strong emphasis, because the closing delimiter is +preceded by whitespace: + +```````````````````````````````` example +__foo bar __ +. +<p>__foo bar __</p> +```````````````````````````````` + + +This is not strong emphasis, because the second `__` is +preceded by punctuation and followed by an alphanumeric: + +```````````````````````````````` example +__(__foo) +. +<p>__(__foo)</p> +```````````````````````````````` + + +The point of this restriction is more easily appreciated +with this example: + +```````````````````````````````` example +_(__foo__)_ +. +<p><em>(<strong>foo</strong>)</em></p> +```````````````````````````````` + + +Intraword strong emphasis is forbidden with `__`: + +```````````````````````````````` example +__foo__bar +. +<p>__foo__bar</p> +```````````````````````````````` + + +```````````````````````````````` example +__пристаням__стремятся +. +<p>__пристаням__стремятся</p> +```````````````````````````````` + + +```````````````````````````````` example +__foo__bar__baz__ +. +<p><strong>foo__bar__baz</strong></p> +```````````````````````````````` + + +This is strong emphasis, even though the closing delimiter is +both left- and right-flanking, because it is followed by +punctuation: + +```````````````````````````````` example +__(bar)__. +. +<p><strong>(bar)</strong>.</p> +```````````````````````````````` + + +Rule 9: + +Any nonempty sequence of inline elements can be the contents of an +emphasized span. + +```````````````````````````````` example +*foo [bar](/url)* +. +<p><em>foo <a href="/url">bar</a></em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo +bar* +. +<p><em>foo +bar</em></p> +```````````````````````````````` + + +In particular, emphasis and strong emphasis can be nested +inside emphasis: + +```````````````````````````````` example +_foo __bar__ baz_ +. +<p><em>foo <strong>bar</strong> baz</em></p> +```````````````````````````````` + + +```````````````````````````````` example +_foo _bar_ baz_ +. +<p><em>foo <em>bar</em> baz</em></p> +```````````````````````````````` + + +```````````````````````````````` example +__foo_ bar_ +. +<p><em><em>foo</em> bar</em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo *bar** +. +<p><em>foo <em>bar</em></em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo **bar** baz* +. +<p><em>foo <strong>bar</strong> baz</em></p> +```````````````````````````````` + +```````````````````````````````` example +*foo**bar**baz* +. +<p><em>foo<strong>bar</strong>baz</em></p> +```````````````````````````````` + +Note that in the preceding case, the interpretation + +``` markdown +<p><em>foo</em><em>bar<em></em>baz</em></p> +``` + + +is precluded by the condition that a delimiter that +can both open and close (like the `*` after `foo`) +cannot form emphasis if the sum of the lengths of +the delimiter runs containing the opening and +closing delimiters is a multiple of 3 unless +both lengths are multiples of 3. + + +For the same reason, we don't get two consecutive +emphasis sections in this example: + +```````````````````````````````` example +*foo**bar* +. +<p><em>foo**bar</em></p> +```````````````````````````````` + + +The same condition ensures that the following +cases are all strong emphasis nested inside +emphasis, even when the interior spaces are +omitted: + + +```````````````````````````````` example +***foo** bar* +. +<p><em><strong>foo</strong> bar</em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo **bar*** +. +<p><em>foo <strong>bar</strong></em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo**bar*** +. +<p><em>foo<strong>bar</strong></em></p> +```````````````````````````````` + + +When the lengths of the interior closing and opening +delimiter runs are *both* multiples of 3, though, +they can match to create emphasis: + +```````````````````````````````` example +foo***bar***baz +. +<p>foo<em><strong>bar</strong></em>baz</p> +```````````````````````````````` + +```````````````````````````````` example +foo******bar*********baz +. +<p>foo<strong><strong><strong>bar</strong></strong></strong>***baz</p> +```````````````````````````````` + + +Indefinite levels of nesting are possible: + +```````````````````````````````` example +*foo **bar *baz* bim** bop* +. +<p><em>foo <strong>bar <em>baz</em> bim</strong> bop</em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo [*bar*](/url)* +. +<p><em>foo <a href="/url"><em>bar</em></a></em></p> +```````````````````````````````` + + +There can be no empty emphasis or strong emphasis: + +```````````````````````````````` example +** is not an empty emphasis +. +<p>** is not an empty emphasis</p> +```````````````````````````````` + + +```````````````````````````````` example +**** is not an empty strong emphasis +. +<p>**** is not an empty strong emphasis</p> +```````````````````````````````` + + + +Rule 10: + +Any nonempty sequence of inline elements can be the contents of an +strongly emphasized span. + +```````````````````````````````` example +**foo [bar](/url)** +. +<p><strong>foo <a href="/url">bar</a></strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo +bar** +. +<p><strong>foo +bar</strong></p> +```````````````````````````````` + + +In particular, emphasis and strong emphasis can be nested +inside strong emphasis: + +```````````````````````````````` example +__foo _bar_ baz__ +. +<p><strong>foo <em>bar</em> baz</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +__foo __bar__ baz__ +. +<p><strong>foo <strong>bar</strong> baz</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +____foo__ bar__ +. +<p><strong><strong>foo</strong> bar</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo **bar**** +. +<p><strong>foo <strong>bar</strong></strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo *bar* baz** +. +<p><strong>foo <em>bar</em> baz</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo*bar*baz** +. +<p><strong>foo<em>bar</em>baz</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +***foo* bar** +. +<p><strong><em>foo</em> bar</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo *bar*** +. +<p><strong>foo <em>bar</em></strong></p> +```````````````````````````````` + + +Indefinite levels of nesting are possible: + +```````````````````````````````` example +**foo *bar **baz** +bim* bop** +. +<p><strong>foo <em>bar <strong>baz</strong> +bim</em> bop</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo [*bar*](/url)** +. +<p><strong>foo <a href="/url"><em>bar</em></a></strong></p> +```````````````````````````````` + + +There can be no empty emphasis or strong emphasis: + +```````````````````````````````` example +__ is not an empty emphasis +. +<p>__ is not an empty emphasis</p> +```````````````````````````````` + + +```````````````````````````````` example +____ is not an empty strong emphasis +. +<p>____ is not an empty strong emphasis</p> +```````````````````````````````` + + + +Rule 11: + +```````````````````````````````` example +foo *** +. +<p>foo ***</p> +```````````````````````````````` + + +```````````````````````````````` example +foo *\** +. +<p>foo <em>*</em></p> +```````````````````````````````` + + +```````````````````````````````` example +foo *_* +. +<p>foo <em>_</em></p> +```````````````````````````````` + + +```````````````````````````````` example +foo ***** +. +<p>foo *****</p> +```````````````````````````````` + + +```````````````````````````````` example +foo **\*** +. +<p>foo <strong>*</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +foo **_** +. +<p>foo <strong>_</strong></p> +```````````````````````````````` + + +Note that when delimiters do not match evenly, Rule 11 determines +that the excess literal `*` characters will appear outside of the +emphasis, rather than inside it: + +```````````````````````````````` example +**foo* +. +<p>*<em>foo</em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo** +. +<p><em>foo</em>*</p> +```````````````````````````````` + + +```````````````````````````````` example +***foo** +. +<p>*<strong>foo</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +****foo* +. +<p>***<em>foo</em></p> +```````````````````````````````` + + +```````````````````````````````` example +**foo*** +. +<p><strong>foo</strong>*</p> +```````````````````````````````` + + +```````````````````````````````` example +*foo**** +. +<p><em>foo</em>***</p> +```````````````````````````````` + + + +Rule 12: + +```````````````````````````````` example +foo ___ +. +<p>foo ___</p> +```````````````````````````````` + + +```````````````````````````````` example +foo _\__ +. +<p>foo <em>_</em></p> +```````````````````````````````` + + +```````````````````````````````` example +foo _*_ +. +<p>foo <em>*</em></p> +```````````````````````````````` + + +```````````````````````````````` example +foo _____ +. +<p>foo _____</p> +```````````````````````````````` + + +```````````````````````````````` example +foo __\___ +. +<p>foo <strong>_</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +foo __*__ +. +<p>foo <strong>*</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +__foo_ +. +<p>_<em>foo</em></p> +```````````````````````````````` + + +Note that when delimiters do not match evenly, Rule 12 determines +that the excess literal `_` characters will appear outside of the +emphasis, rather than inside it: + +```````````````````````````````` example +_foo__ +. +<p><em>foo</em>_</p> +```````````````````````````````` + + +```````````````````````````````` example +___foo__ +. +<p>_<strong>foo</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +____foo_ +. +<p>___<em>foo</em></p> +```````````````````````````````` + + +```````````````````````````````` example +__foo___ +. +<p><strong>foo</strong>_</p> +```````````````````````````````` + + +```````````````````````````````` example +_foo____ +. +<p><em>foo</em>___</p> +```````````````````````````````` + + +Rule 13 implies that if you want emphasis nested directly inside +emphasis, you must use different delimiters: + +```````````````````````````````` example +**foo** +. +<p><strong>foo</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +*_foo_* +. +<p><em><em>foo</em></em></p> +```````````````````````````````` + + +```````````````````````````````` example +__foo__ +. +<p><strong>foo</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +_*foo*_ +. +<p><em><em>foo</em></em></p> +```````````````````````````````` + + +However, strong emphasis within strong emphasis is possible without +switching delimiters: + +```````````````````````````````` example +****foo**** +. +<p><strong><strong>foo</strong></strong></p> +```````````````````````````````` + + +```````````````````````````````` example +____foo____ +. +<p><strong><strong>foo</strong></strong></p> +```````````````````````````````` + + + +Rule 13 can be applied to arbitrarily long sequences of +delimiters: + +```````````````````````````````` example +******foo****** +. +<p><strong><strong><strong>foo</strong></strong></strong></p> +```````````````````````````````` + + +Rule 14: + +```````````````````````````````` example +***foo*** +. +<p><em><strong>foo</strong></em></p> +```````````````````````````````` + + +```````````````````````````````` example +_____foo_____ +. +<p><em><strong><strong>foo</strong></strong></em></p> +```````````````````````````````` + + +Rule 15: + +```````````````````````````````` example +*foo _bar* baz_ +. +<p><em>foo _bar</em> baz_</p> +```````````````````````````````` + + +```````````````````````````````` example +*foo __bar *baz bim__ bam* +. +<p><em>foo <strong>bar *baz bim</strong> bam</em></p> +```````````````````````````````` + + +Rule 16: + +```````````````````````````````` example +**foo **bar baz** +. +<p>**foo <strong>bar baz</strong></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo *bar baz* +. +<p>*foo <em>bar baz</em></p> +```````````````````````````````` + + +Rule 17: + +```````````````````````````````` example +*[bar*](/url) +. +<p>*<a href="/url">bar*</a></p> +```````````````````````````````` + + +```````````````````````````````` example +_foo [bar_](/url) +. +<p>_foo <a href="/url">bar_</a></p> +```````````````````````````````` + + +```````````````````````````````` example +*<img src="foo" title="*"/> +. +<p>*<img src="foo" title="*"/></p> +```````````````````````````````` + + +```````````````````````````````` example +**<a href="**"> +. +<p>**<a href="**"></p> +```````````````````````````````` + + +```````````````````````````````` example +__<a href="__"> +. +<p>__<a href="__"></p> +```````````````````````````````` + + +```````````````````````````````` example +*a `*`* +. +<p><em>a <code>*</code></em></p> +```````````````````````````````` + + +```````````````````````````````` example +_a `_`_ +. +<p><em>a <code>_</code></em></p> +```````````````````````````````` + + +```````````````````````````````` example +**a<http://foo.bar/?q=**> +. +<p>**a<a href="http://foo.bar/?q=**">http://foo.bar/?q=**</a></p> +```````````````````````````````` + + +```````````````````````````````` example +__a<http://foo.bar/?q=__> +. +<p>__a<a href="http://foo.bar/?q=__">http://foo.bar/?q=__</a></p> +```````````````````````````````` + + + +## Links + +A link contains [link text] (the visible text), a [link destination] +(the URI that is the link destination), and optionally a [link title]. +There are two basic kinds of links in Markdown. In [inline links] the +destination and title are given immediately after the link text. In +[reference links] the destination and title are defined elsewhere in +the document. + +A [link text](@) consists of a sequence of zero or more +inline elements enclosed by square brackets (`[` and `]`). The +following rules apply: + +- Links may not contain other links, at any level of nesting. If + multiple otherwise valid link definitions appear nested inside each + other, the inner-most definition is used. + +- Brackets are allowed in the [link text] only if (a) they + are backslash-escaped or (b) they appear as a matched pair of brackets, + with an open bracket `[`, a sequence of zero or more inlines, and + a close bracket `]`. + +- Backtick [code spans], [autolinks], and raw [HTML tags] bind more tightly + than the brackets in link text. Thus, for example, + `` [foo`]` `` could not be a link text, since the second `]` + is part of a code span. + +- The brackets in link text bind more tightly than markers for + [emphasis and strong emphasis]. Thus, for example, `*[foo*](url)` is a link. + +A [link destination](@) consists of either + +- a sequence of zero or more characters between an opening `<` and a + closing `>` that contains no line breaks or unescaped + `<` or `>` characters, or + +- a nonempty sequence of characters that does not start with + `<`, does not include ASCII space or control characters, and + includes parentheses only if (a) they are backslash-escaped or + (b) they are part of a balanced pair of unescaped parentheses. + (Implementations may impose limits on parentheses nesting to + avoid performance issues, but at least three levels of nesting + should be supported.) + +A [link title](@) consists of either + +- a sequence of zero or more characters between straight double-quote + characters (`"`), including a `"` character only if it is + backslash-escaped, or + +- a sequence of zero or more characters between straight single-quote + characters (`'`), including a `'` character only if it is + backslash-escaped, or + +- a sequence of zero or more characters between matching parentheses + (`(...)`), including a `(` or `)` character only if it is + backslash-escaped. + +Although [link titles] may span multiple lines, they may not contain +a [blank line]. + +An [inline link](@) consists of a [link text] followed immediately +by a left parenthesis `(`, optional [whitespace], an optional +[link destination], an optional [link title] separated from the link +destination by [whitespace], optional [whitespace], and a right +parenthesis `)`. The link's text consists of the inlines contained +in the [link text] (excluding the enclosing square brackets). +The link's URI consists of the link destination, excluding enclosing +`<...>` if present, with backslash-escapes in effect as described +above. The link's title consists of the link title, excluding its +enclosing delimiters, with backslash-escapes in effect as described +above. + +Here is a simple inline link: + +```````````````````````````````` example +[link](/uri "title") +. +<p><a href="/uri" title="title">link</a></p> +```````````````````````````````` + + +The title may be omitted: + +```````````````````````````````` example +[link](/uri) +. +<p><a href="/uri">link</a></p> +```````````````````````````````` + + +Both the title and the destination may be omitted: + +```````````````````````````````` example +[link]() +. +<p><a href="">link</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[link](<>) +. +<p><a href="">link</a></p> +```````````````````````````````` + +The destination can only contain spaces if it is +enclosed in pointy brackets: + +```````````````````````````````` example +[link](/my uri) +. +<p>[link](/my uri)</p> +```````````````````````````````` + +```````````````````````````````` example +[link](</my uri>) +. +<p><a href="/my%20uri">link</a></p> +```````````````````````````````` + +The destination cannot contain line breaks, +even if enclosed in pointy brackets: + +```````````````````````````````` example +[link](foo +bar) +. +<p>[link](foo +bar)</p> +```````````````````````````````` + +```````````````````````````````` example +[link](<foo +bar>) +. +<p>[link](<foo +bar>)</p> +```````````````````````````````` + +The destination can contain `)` if it is enclosed +in pointy brackets: + +```````````````````````````````` example +[a](<b)c>) +. +<p><a href="b)c">a</a></p> +```````````````````````````````` + +Pointy brackets that enclose links must be unescaped: + +```````````````````````````````` example +[link](<foo\>) +. +<p>[link](<foo>)</p> +```````````````````````````````` + +These are not links, because the opening pointy bracket +is not matched properly: + +```````````````````````````````` example +[a](<b)c +[a](<b)c> +[a](<b>c) +. +<p>[a](<b)c +[a](<b)c> +[a](<b>c)</p> +```````````````````````````````` + +Parentheses inside the link destination may be escaped: + +```````````````````````````````` example +[link](\(foo\)) +. +<p><a href="(foo)">link</a></p> +```````````````````````````````` + +Any number of parentheses are allowed without escaping, as long as they are +balanced: + +```````````````````````````````` example +[link](foo(and(bar))) +. +<p><a href="foo(and(bar))">link</a></p> +```````````````````````````````` + +However, if you have unbalanced parentheses, you need to escape or use the +`<...>` form: + +```````````````````````````````` example +[link](foo\(and\(bar\)) +. +<p><a href="foo(and(bar)">link</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[link](<foo(and(bar)>) +. +<p><a href="foo(and(bar)">link</a></p> +```````````````````````````````` + + +Parentheses and other symbols can also be escaped, as usual +in Markdown: + +```````````````````````````````` example +[link](foo\)\:) +. +<p><a href="foo):">link</a></p> +```````````````````````````````` + + +A link can contain fragment identifiers and queries: + +```````````````````````````````` example +[link](#fragment) + +[link](http://example.com#fragment) + +[link](http://example.com?foo=3#frag) +. +<p><a href="#fragment">link</a></p> +<p><a href="http://example.com#fragment">link</a></p> +<p><a href="http://example.com?foo=3#frag">link</a></p> +```````````````````````````````` + + +Note that a backslash before a non-escapable character is +just a backslash: + +```````````````````````````````` example +[link](foo\bar) +. +<p><a href="foo%5Cbar">link</a></p> +```````````````````````````````` + + +URL-escaping should be left alone inside the destination, as all +URL-escaped characters are also valid URL characters. Entity and +numerical character references in the destination will be parsed +into the corresponding Unicode code points, as usual. These may +be optionally URL-escaped when written as HTML, but this spec +does not enforce any particular policy for rendering URLs in +HTML or other formats. Renderers may make different decisions +about how to escape or normalize URLs in the output. + +```````````````````````````````` example +[link](foo%20bä) +. +<p><a href="foo%20b%C3%A4">link</a></p> +```````````````````````````````` + + +Note that, because titles can often be parsed as destinations, +if you try to omit the destination and keep the title, you'll +get unexpected results: + +```````````````````````````````` example +[link]("title") +. +<p><a href="%22title%22">link</a></p> +```````````````````````````````` + + +Titles may be in single quotes, double quotes, or parentheses: + +```````````````````````````````` example +[link](/url "title") +[link](/url 'title') +[link](/url (title)) +. +<p><a href="/url" title="title">link</a> +<a href="/url" title="title">link</a> +<a href="/url" title="title">link</a></p> +```````````````````````````````` + + +Backslash escapes and entity and numeric character references +may be used in titles: + +```````````````````````````````` example +[link](/url "title \""") +. +<p><a href="/url" title="title """>link</a></p> +```````````````````````````````` + + +Titles must be separated from the link using a [whitespace]. +Other [Unicode whitespace] like non-breaking space doesn't work. + +```````````````````````````````` example +[link](/url "title") +. +<p><a href="/url%C2%A0%22title%22">link</a></p> +```````````````````````````````` + + +Nested balanced quotes are not allowed without escaping: + +```````````````````````````````` example +[link](/url "title "and" title") +. +<p>[link](/url "title "and" title")</p> +```````````````````````````````` + + +But it is easy to work around this by using a different quote type: + +```````````````````````````````` example +[link](/url 'title "and" title') +. +<p><a href="/url" title="title "and" title">link</a></p> +```````````````````````````````` + + +(Note: `Markdown.pl` did allow double quotes inside a double-quoted +title, and its test suite included a test demonstrating this. +But it is hard to see a good rationale for the extra complexity this +brings, since there are already many ways---backslash escaping, +entity and numeric character references, or using a different +quote type for the enclosing title---to write titles containing +double quotes. `Markdown.pl`'s handling of titles has a number +of other strange features. For example, it allows single-quoted +titles in inline links, but not reference links. And, in +reference links but not inline links, it allows a title to begin +with `"` and end with `)`. `Markdown.pl` 1.0.1 even allows +titles with no closing quotation mark, though 1.0.2b8 does not. +It seems preferable to adopt a simple, rational rule that works +the same way in inline links and link reference definitions.) + +[Whitespace] is allowed around the destination and title: + +```````````````````````````````` example +[link]( /uri + "title" ) +. +<p><a href="/uri" title="title">link</a></p> +```````````````````````````````` + + +But it is not allowed between the link text and the +following parenthesis: + +```````````````````````````````` example +[link] (/uri) +. +<p>[link] (/uri)</p> +```````````````````````````````` + + +The link text may contain balanced brackets, but not unbalanced ones, +unless they are escaped: + +```````````````````````````````` example +[link [foo [bar]]](/uri) +. +<p><a href="/uri">link [foo [bar]]</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[link] bar](/uri) +. +<p>[link] bar](/uri)</p> +```````````````````````````````` + + +```````````````````````````````` example +[link [bar](/uri) +. +<p>[link <a href="/uri">bar</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[link \[bar](/uri) +. +<p><a href="/uri">link [bar</a></p> +```````````````````````````````` + + +The link text may contain inline content: + +```````````````````````````````` example +[link *foo **bar** `#`*](/uri) +. +<p><a href="/uri">link <em>foo <strong>bar</strong> <code>#</code></em></a></p> +```````````````````````````````` + + +```````````````````````````````` example +[![moon](moon.jpg)](/uri) +. +<p><a href="/uri"><img src="moon.jpg" alt="moon" /></a></p> +```````````````````````````````` + + +However, links may not contain other links, at any level of nesting. + +```````````````````````````````` example +[foo [bar](/uri)](/uri) +. +<p>[foo <a href="/uri">bar</a>](/uri)</p> +```````````````````````````````` + + +```````````````````````````````` example +[foo *[bar [baz](/uri)](/uri)*](/uri) +. +<p>[foo <em>[bar <a href="/uri">baz</a>](/uri)</em>](/uri)</p> +```````````````````````````````` + + +```````````````````````````````` example +![[[foo](uri1)](uri2)](uri3) +. +<p><img src="uri3" alt="[foo](uri2)" /></p> +```````````````````````````````` + + +These cases illustrate the precedence of link text grouping over +emphasis grouping: + +```````````````````````````````` example +*[foo*](/uri) +. +<p>*<a href="/uri">foo*</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo *bar](baz*) +. +<p><a href="baz*">foo *bar</a></p> +```````````````````````````````` + + +Note that brackets that *aren't* part of links do not take +precedence: + +```````````````````````````````` example +*foo [bar* baz] +. +<p><em>foo [bar</em> baz]</p> +```````````````````````````````` + + +These cases illustrate the precedence of HTML tags, code spans, +and autolinks over link grouping: + +```````````````````````````````` example +[foo <bar attr="](baz)"> +. +<p>[foo <bar attr="](baz)"></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo`](/uri)` +. +<p>[foo<code>](/uri)</code></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo<http://example.com/?search=](uri)> +. +<p>[foo<a href="http://example.com/?search=%5D(uri)">http://example.com/?search=](uri)</a></p> +```````````````````````````````` + + +There are three kinds of [reference link](@)s: +[full](#full-reference-link), [collapsed](#collapsed-reference-link), +and [shortcut](#shortcut-reference-link). + +A [full reference link](@) +consists of a [link text] immediately followed by a [link label] +that [matches] a [link reference definition] elsewhere in the document. + +A [link label](@) begins with a left bracket (`[`) and ends +with the first right bracket (`]`) that is not backslash-escaped. +Between these brackets there must be at least one [non-whitespace character]. +Unescaped square bracket characters are not allowed inside the +opening and closing square brackets of [link labels]. A link +label can have at most 999 characters inside the square +brackets. + +One label [matches](@) +another just in case their normalized forms are equal. To normalize a +label, strip off the opening and closing brackets, +perform the *Unicode case fold*, strip leading and trailing +[whitespace] and collapse consecutive internal +[whitespace] to a single space. If there are multiple +matching reference link definitions, the one that comes first in the +document is used. (It is desirable in such cases to emit a warning.) + +The contents of the first link label are parsed as inlines, which are +used as the link's text. The link's URI and title are provided by the +matching [link reference definition]. + +Here is a simple example: + +```````````````````````````````` example +[foo][bar] + +[bar]: /url "title" +. +<p><a href="/url" title="title">foo</a></p> +```````````````````````````````` + + +The rules for the [link text] are the same as with +[inline links]. Thus: + +The link text may contain balanced brackets, but not unbalanced ones, +unless they are escaped: + +```````````````````````````````` example +[link [foo [bar]]][ref] + +[ref]: /uri +. +<p><a href="/uri">link [foo [bar]]</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[link \[bar][ref] + +[ref]: /uri +. +<p><a href="/uri">link [bar</a></p> +```````````````````````````````` + + +The link text may contain inline content: + +```````````````````````````````` example +[link *foo **bar** `#`*][ref] + +[ref]: /uri +. +<p><a href="/uri">link <em>foo <strong>bar</strong> <code>#</code></em></a></p> +```````````````````````````````` + + +```````````````````````````````` example +[![moon](moon.jpg)][ref] + +[ref]: /uri +. +<p><a href="/uri"><img src="moon.jpg" alt="moon" /></a></p> +```````````````````````````````` + + +However, links may not contain other links, at any level of nesting. + +```````````````````````````````` example +[foo [bar](/uri)][ref] + +[ref]: /uri +. +<p>[foo <a href="/uri">bar</a>]<a href="/uri">ref</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo *bar [baz][ref]*][ref] + +[ref]: /uri +. +<p>[foo <em>bar <a href="/uri">baz</a></em>]<a href="/uri">ref</a></p> +```````````````````````````````` + + +(In the examples above, we have two [shortcut reference links] +instead of one [full reference link].) + +The following cases illustrate the precedence of link text grouping over +emphasis grouping: + +```````````````````````````````` example +*[foo*][ref] + +[ref]: /uri +. +<p>*<a href="/uri">foo*</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo *bar][ref] + +[ref]: /uri +. +<p><a href="/uri">foo *bar</a></p> +```````````````````````````````` + + +These cases illustrate the precedence of HTML tags, code spans, +and autolinks over link grouping: + +```````````````````````````````` example +[foo <bar attr="][ref]"> + +[ref]: /uri +. +<p>[foo <bar attr="][ref]"></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo`][ref]` + +[ref]: /uri +. +<p>[foo<code>][ref]</code></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo<http://example.com/?search=][ref]> + +[ref]: /uri +. +<p>[foo<a href="http://example.com/?search=%5D%5Bref%5D">http://example.com/?search=][ref]</a></p> +```````````````````````````````` + + +Matching is case-insensitive: + +```````````````````````````````` example +[foo][BaR] + +[bar]: /url "title" +. +<p><a href="/url" title="title">foo</a></p> +```````````````````````````````` + + +Unicode case fold is used: + +```````````````````````````````` example +[Толпой][Толпой] is a Russian word. + +[ТОЛПОЙ]: /url +. +<p><a href="/url">Толпой</a> is a Russian word.</p> +```````````````````````````````` + + +Consecutive internal [whitespace] is treated as one space for +purposes of determining matching: + +```````````````````````````````` example +[Foo + bar]: /url + +[Baz][Foo bar] +. +<p><a href="/url">Baz</a></p> +```````````````````````````````` + + +No [whitespace] is allowed between the [link text] and the +[link label]: + +```````````````````````````````` example +[foo] [bar] + +[bar]: /url "title" +. +<p>[foo] <a href="/url" title="title">bar</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[foo] +[bar] + +[bar]: /url "title" +. +<p>[foo] +<a href="/url" title="title">bar</a></p> +```````````````````````````````` + + +This is a departure from John Gruber's original Markdown syntax +description, which explicitly allows whitespace between the link +text and the link label. It brings reference links in line with +[inline links], which (according to both original Markdown and +this spec) cannot have whitespace after the link text. More +importantly, it prevents inadvertent capture of consecutive +[shortcut reference links]. If whitespace is allowed between the +link text and the link label, then in the following we will have +a single reference link, not two shortcut reference links, as +intended: + +``` markdown +[foo] +[bar] + +[foo]: /url1 +[bar]: /url2 +``` + +(Note that [shortcut reference links] were introduced by Gruber +himself in a beta version of `Markdown.pl`, but never included +in the official syntax description. Without shortcut reference +links, it is harmless to allow space between the link text and +link label; but once shortcut references are introduced, it is +too dangerous to allow this, as it frequently leads to +unintended results.) + +When there are multiple matching [link reference definitions], +the first is used: + +```````````````````````````````` example +[foo]: /url1 + +[foo]: /url2 + +[bar][foo] +. +<p><a href="/url1">bar</a></p> +```````````````````````````````` + + +Note that matching is performed on normalized strings, not parsed +inline content. So the following does not match, even though the +labels define equivalent inline content: + +```````````````````````````````` example +[bar][foo\!] + +[foo!]: /url +. +<p>[bar][foo!]</p> +```````````````````````````````` + + +[Link labels] cannot contain brackets, unless they are +backslash-escaped: + +```````````````````````````````` example +[foo][ref[] + +[ref[]: /uri +. +<p>[foo][ref[]</p> +<p>[ref[]: /uri</p> +```````````````````````````````` + + +```````````````````````````````` example +[foo][ref[bar]] + +[ref[bar]]: /uri +. +<p>[foo][ref[bar]]</p> +<p>[ref[bar]]: /uri</p> +```````````````````````````````` + + +```````````````````````````````` example +[[[foo]]] + +[[[foo]]]: /url +. +<p>[[[foo]]]</p> +<p>[[[foo]]]: /url</p> +```````````````````````````````` + + +```````````````````````````````` example +[foo][ref\[] + +[ref\[]: /uri +. +<p><a href="/uri">foo</a></p> +```````````````````````````````` + + +Note that in this example `]` is not backslash-escaped: + +```````````````````````````````` example +[bar\\]: /uri + +[bar\\] +. +<p><a href="/uri">bar\</a></p> +```````````````````````````````` + + +A [link label] must contain at least one [non-whitespace character]: + +```````````````````````````````` example +[] + +[]: /uri +. +<p>[]</p> +<p>[]: /uri</p> +```````````````````````````````` + + +```````````````````````````````` example +[ + ] + +[ + ]: /uri +. +<p>[ +]</p> +<p>[ +]: /uri</p> +```````````````````````````````` + + +A [collapsed reference link](@) +consists of a [link label] that [matches] a +[link reference definition] elsewhere in the +document, followed by the string `[]`. +The contents of the first link label are parsed as inlines, +which are used as the link's text. The link's URI and title are +provided by the matching reference link definition. Thus, +`[foo][]` is equivalent to `[foo][foo]`. + +```````````````````````````````` example +[foo][] + +[foo]: /url "title" +. +<p><a href="/url" title="title">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[*foo* bar][] + +[*foo* bar]: /url "title" +. +<p><a href="/url" title="title"><em>foo</em> bar</a></p> +```````````````````````````````` + + +The link labels are case-insensitive: + +```````````````````````````````` example +[Foo][] + +[foo]: /url "title" +. +<p><a href="/url" title="title">Foo</a></p> +```````````````````````````````` + + + +As with full reference links, [whitespace] is not +allowed between the two sets of brackets: + +```````````````````````````````` example +[foo] +[] + +[foo]: /url "title" +. +<p><a href="/url" title="title">foo</a> +[]</p> +```````````````````````````````` + + +A [shortcut reference link](@) +consists of a [link label] that [matches] a +[link reference definition] elsewhere in the +document and is not followed by `[]` or a link label. +The contents of the first link label are parsed as inlines, +which are used as the link's text. The link's URI and title +are provided by the matching link reference definition. +Thus, `[foo]` is equivalent to `[foo][]`. + +```````````````````````````````` example +[foo] + +[foo]: /url "title" +. +<p><a href="/url" title="title">foo</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[*foo* bar] + +[*foo* bar]: /url "title" +. +<p><a href="/url" title="title"><em>foo</em> bar</a></p> +```````````````````````````````` + + +```````````````````````````````` example +[[*foo* bar]] + +[*foo* bar]: /url "title" +. +<p>[<a href="/url" title="title"><em>foo</em> bar</a>]</p> +```````````````````````````````` + + +```````````````````````````````` example +[[bar [foo] + +[foo]: /url +. +<p>[[bar <a href="/url">foo</a></p> +```````````````````````````````` + + +The link labels are case-insensitive: + +```````````````````````````````` example +[Foo] + +[foo]: /url "title" +. +<p><a href="/url" title="title">Foo</a></p> +```````````````````````````````` + + +A space after the link text should be preserved: + +```````````````````````````````` example +[foo] bar + +[foo]: /url +. +<p><a href="/url">foo</a> bar</p> +```````````````````````````````` + + +If you just want bracketed text, you can backslash-escape the +opening bracket to avoid links: + +```````````````````````````````` example +\[foo] + +[foo]: /url "title" +. +<p>[foo]</p> +```````````````````````````````` + + +Note that this is a link, because a link label ends with the first +following closing bracket: + +```````````````````````````````` example +[foo*]: /url + +*[foo*] +. +<p>*<a href="/url">foo*</a></p> +```````````````````````````````` + + +Full and compact references take precedence over shortcut +references: + +```````````````````````````````` example +[foo][bar] + +[foo]: /url1 +[bar]: /url2 +. +<p><a href="/url2">foo</a></p> +```````````````````````````````` + +```````````````````````````````` example +[foo][] + +[foo]: /url1 +. +<p><a href="/url1">foo</a></p> +```````````````````````````````` + +Inline links also take precedence: + +```````````````````````````````` example +[foo]() + +[foo]: /url1 +. +<p><a href="">foo</a></p> +```````````````````````````````` + +```````````````````````````````` example +[foo](not a link) + +[foo]: /url1 +. +<p><a href="/url1">foo</a>(not a link)</p> +```````````````````````````````` + +In the following case `[bar][baz]` is parsed as a reference, +`[foo]` as normal text: + +```````````````````````````````` example +[foo][bar][baz] + +[baz]: /url +. +<p>[foo]<a href="/url">bar</a></p> +```````````````````````````````` + + +Here, though, `[foo][bar]` is parsed as a reference, since +`[bar]` is defined: + +```````````````````````````````` example +[foo][bar][baz] + +[baz]: /url1 +[bar]: /url2 +. +<p><a href="/url2">foo</a><a href="/url1">baz</a></p> +```````````````````````````````` + + +Here `[foo]` is not parsed as a shortcut reference, because it +is followed by a link label (even though `[bar]` is not defined): + +```````````````````````````````` example +[foo][bar][baz] + +[baz]: /url1 +[foo]: /url2 +. +<p>[foo]<a href="/url1">bar</a></p> +```````````````````````````````` + + + +## Images + +Syntax for images is like the syntax for links, with one +difference. Instead of [link text], we have an +[image description](@). The rules for this are the +same as for [link text], except that (a) an +image description starts with `![` rather than `[`, and +(b) an image description may contain links. +An image description has inline elements +as its contents. When an image is rendered to HTML, +this is standardly used as the image's `alt` attribute. + +```````````````````````````````` example +![foo](/url "title") +. +<p><img src="/url" alt="foo" title="title" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo *bar*] + +[foo *bar*]: train.jpg "train & tracks" +. +<p><img src="train.jpg" alt="foo bar" title="train & tracks" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo ![bar](/url)](/url2) +. +<p><img src="/url2" alt="foo bar" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo [bar](/url)](/url2) +. +<p><img src="/url2" alt="foo bar" /></p> +```````````````````````````````` + + +Though this spec is concerned with parsing, not rendering, it is +recommended that in rendering to HTML, only the plain string content +of the [image description] be used. Note that in +the above example, the alt attribute's value is `foo bar`, not `foo +[bar](/url)` or `foo <a href="/url">bar</a>`. Only the plain string +content is rendered, without formatting. + +```````````````````````````````` example +![foo *bar*][] + +[foo *bar*]: train.jpg "train & tracks" +. +<p><img src="train.jpg" alt="foo bar" title="train & tracks" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo *bar*][foobar] + +[FOOBAR]: train.jpg "train & tracks" +. +<p><img src="train.jpg" alt="foo bar" title="train & tracks" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo](train.jpg) +. +<p><img src="train.jpg" alt="foo" /></p> +```````````````````````````````` + + +```````````````````````````````` example +My ![foo bar](/path/to/train.jpg "title" ) +. +<p>My <img src="/path/to/train.jpg" alt="foo bar" title="title" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo](<url>) +. +<p><img src="url" alt="foo" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![](/url) +. +<p><img src="/url" alt="" /></p> +```````````````````````````````` + + +Reference-style: + +```````````````````````````````` example +![foo][bar] + +[bar]: /url +. +<p><img src="/url" alt="foo" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![foo][bar] + +[BAR]: /url +. +<p><img src="/url" alt="foo" /></p> +```````````````````````````````` + + +Collapsed: + +```````````````````````````````` example +![foo][] + +[foo]: /url "title" +. +<p><img src="/url" alt="foo" title="title" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![*foo* bar][] + +[*foo* bar]: /url "title" +. +<p><img src="/url" alt="foo bar" title="title" /></p> +```````````````````````````````` + + +The labels are case-insensitive: + +```````````````````````````````` example +![Foo][] + +[foo]: /url "title" +. +<p><img src="/url" alt="Foo" title="title" /></p> +```````````````````````````````` + + +As with reference links, [whitespace] is not allowed +between the two sets of brackets: + +```````````````````````````````` example +![foo] +[] + +[foo]: /url "title" +. +<p><img src="/url" alt="foo" title="title" /> +[]</p> +```````````````````````````````` + + +Shortcut: + +```````````````````````````````` example +![foo] + +[foo]: /url "title" +. +<p><img src="/url" alt="foo" title="title" /></p> +```````````````````````````````` + + +```````````````````````````````` example +![*foo* bar] + +[*foo* bar]: /url "title" +. +<p><img src="/url" alt="foo bar" title="title" /></p> +```````````````````````````````` + + +Note that link labels cannot contain unescaped brackets: + +```````````````````````````````` example +![[foo]] + +[[foo]]: /url "title" +. +<p>![[foo]]</p> +<p>[[foo]]: /url "title"</p> +```````````````````````````````` + + +The link labels are case-insensitive: + +```````````````````````````````` example +![Foo] + +[foo]: /url "title" +. +<p><img src="/url" alt="Foo" title="title" /></p> +```````````````````````````````` + + +If you just want a literal `!` followed by bracketed text, you can +backslash-escape the opening `[`: + +```````````````````````````````` example +!\[foo] + +[foo]: /url "title" +. +<p>![foo]</p> +```````````````````````````````` + + +If you want a link after a literal `!`, backslash-escape the +`!`: + +```````````````````````````````` example +\![foo] + +[foo]: /url "title" +. +<p>!<a href="/url" title="title">foo</a></p> +```````````````````````````````` + + +## Autolinks + +[Autolink](@)s are absolute URIs and email addresses inside +`<` and `>`. They are parsed as links, with the URL or email address +as the link label. + +A [URI autolink](@) consists of `<`, followed by an +[absolute URI] followed by `>`. It is parsed as +a link to the URI, with the URI as the link's label. + +An [absolute URI](@), +for these purposes, consists of a [scheme] followed by a colon (`:`) +followed by zero or more characters other than ASCII +[whitespace] and control characters, `<`, and `>`. If +the URI includes these characters, they must be percent-encoded +(e.g. `%20` for a space). + +For purposes of this spec, a [scheme](@) is any sequence +of 2--32 characters beginning with an ASCII letter and followed +by any combination of ASCII letters, digits, or the symbols plus +("+"), period ("."), or hyphen ("-"). + +Here are some valid autolinks: + +```````````````````````````````` example +<http://foo.bar.baz> +. +<p><a href="http://foo.bar.baz">http://foo.bar.baz</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<http://foo.bar.baz/test?q=hello&id=22&boolean> +. +<p><a href="http://foo.bar.baz/test?q=hello&id=22&boolean">http://foo.bar.baz/test?q=hello&id=22&boolean</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<irc://foo.bar:2233/baz> +. +<p><a href="irc://foo.bar:2233/baz">irc://foo.bar:2233/baz</a></p> +```````````````````````````````` + + +Uppercase is also fine: + +```````````````````````````````` example +<MAILTO:FOO@BAR.BAZ> +. +<p><a href="MAILTO:FOO@BAR.BAZ">MAILTO:FOO@BAR.BAZ</a></p> +```````````````````````````````` + + +Note that many strings that count as [absolute URIs] for +purposes of this spec are not valid URIs, because their +schemes are not registered or because of other problems +with their syntax: + +```````````````````````````````` example +<a+b+c:d> +. +<p><a href="a+b+c:d">a+b+c:d</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<made-up-scheme://foo,bar> +. +<p><a href="made-up-scheme://foo,bar">made-up-scheme://foo,bar</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<http://../> +. +<p><a href="http://../">http://../</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<localhost:5001/foo> +. +<p><a href="localhost:5001/foo">localhost:5001/foo</a></p> +```````````````````````````````` + + +Spaces are not allowed in autolinks: + +```````````````````````````````` example +<http://foo.bar/baz bim> +. +<p><http://foo.bar/baz bim></p> +```````````````````````````````` + + +Backslash-escapes do not work inside autolinks: + +```````````````````````````````` example +<http://example.com/\[\> +. +<p><a href="http://example.com/%5C%5B%5C">http://example.com/\[\</a></p> +```````````````````````````````` + + +An [email autolink](@) +consists of `<`, followed by an [email address], +followed by `>`. The link's label is the email address, +and the URL is `mailto:` followed by the email address. + +An [email address](@), +for these purposes, is anything that matches +the [non-normative regex from the HTML5 +spec](https://html.spec.whatwg.org/multipage/forms.html#e-mail-state-(type=email)): + + /^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])? + (?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/ + +Examples of email autolinks: + +```````````````````````````````` example +<foo@bar.example.com> +. +<p><a href="mailto:foo@bar.example.com">foo@bar.example.com</a></p> +```````````````````````````````` + + +```````````````````````````````` example +<foo+special@Bar.baz-bar0.com> +. +<p><a href="mailto:foo+special@Bar.baz-bar0.com">foo+special@Bar.baz-bar0.com</a></p> +```````````````````````````````` + + +Backslash-escapes do not work inside email autolinks: + +```````````````````````````````` example +<foo\+@bar.example.com> +. +<p><foo+@bar.example.com></p> +```````````````````````````````` + + +These are not autolinks: + +```````````````````````````````` example +<> +. +<p><></p> +```````````````````````````````` + + +```````````````````````````````` example +< http://foo.bar > +. +<p>< http://foo.bar ></p> +```````````````````````````````` + + +```````````````````````````````` example +<m:abc> +. +<p><m:abc></p> +```````````````````````````````` + + +```````````````````````````````` example +<foo.bar.baz> +. +<p><foo.bar.baz></p> +```````````````````````````````` + + +```````````````````````````````` example +http://example.com +. +<p>http://example.com</p> +```````````````````````````````` + + +```````````````````````````````` example +foo@bar.example.com +. +<p>foo@bar.example.com</p> +```````````````````````````````` + + +## Raw HTML + +Text between `<` and `>` that looks like an HTML tag is parsed as a +raw HTML tag and will be rendered in HTML without escaping. +Tag and attribute names are not limited to current HTML tags, +so custom tags (and even, say, DocBook tags) may be used. + +Here is the grammar for tags: + +A [tag name](@) consists of an ASCII letter +followed by zero or more ASCII letters, digits, or +hyphens (`-`). + +An [attribute](@) consists of [whitespace], +an [attribute name], and an optional +[attribute value specification]. + +An [attribute name](@) +consists of an ASCII letter, `_`, or `:`, followed by zero or more ASCII +letters, digits, `_`, `.`, `:`, or `-`. (Note: This is the XML +specification restricted to ASCII. HTML5 is laxer.) + +An [attribute value specification](@) +consists of optional [whitespace], +a `=` character, optional [whitespace], and an [attribute +value]. + +An [attribute value](@) +consists of an [unquoted attribute value], +a [single-quoted attribute value], or a [double-quoted attribute value]. + +An [unquoted attribute value](@) +is a nonempty string of characters not +including [whitespace], `"`, `'`, `=`, `<`, `>`, or `` ` ``. + +A [single-quoted attribute value](@) +consists of `'`, zero or more +characters not including `'`, and a final `'`. + +A [double-quoted attribute value](@) +consists of `"`, zero or more +characters not including `"`, and a final `"`. + +An [open tag](@) consists of a `<` character, a [tag name], +zero or more [attributes], optional [whitespace], an optional `/` +character, and a `>` character. + +A [closing tag](@) consists of the string `</`, a +[tag name], optional [whitespace], and the character `>`. + +An [HTML comment](@) consists of `<!--` + *text* + `-->`, +where *text* does not start with `>` or `->`, does not end with `-`, +and does not contain `--`. (See the +[HTML5 spec](http://www.w3.org/TR/html5/syntax.html#comments).) + +A [processing instruction](@) +consists of the string `<?`, a string +of characters not including the string `?>`, and the string +`?>`. + +A [declaration](@) consists of the +string `<!`, a name consisting of one or more uppercase ASCII letters, +[whitespace], a string of characters not including the +character `>`, and the character `>`. + +A [CDATA section](@) consists of +the string `<![CDATA[`, a string of characters not including the string +`]]>`, and the string `]]>`. + +An [HTML tag](@) consists of an [open tag], a [closing tag], +an [HTML comment], a [processing instruction], a [declaration], +or a [CDATA section]. + +Here are some simple open tags: + +```````````````````````````````` example +<a><bab><c2c> +. +<p><a><bab><c2c></p> +```````````````````````````````` + + +Empty elements: + +```````````````````````````````` example +<a/><b2/> +. +<p><a/><b2/></p> +```````````````````````````````` + + +[Whitespace] is allowed: + +```````````````````````````````` example +<a /><b2 +data="foo" > +. +<p><a /><b2 +data="foo" ></p> +```````````````````````````````` + + +With attributes: + +```````````````````````````````` example +<a foo="bar" bam = 'baz <em>"</em>' +_boolean zoop:33=zoop:33 /> +. +<p><a foo="bar" bam = 'baz <em>"</em>' +_boolean zoop:33=zoop:33 /></p> +```````````````````````````````` + + +Custom tag names can be used: + +```````````````````````````````` example +Foo <responsive-image src="foo.jpg" /> +. +<p>Foo <responsive-image src="foo.jpg" /></p> +```````````````````````````````` + + +Illegal tag names, not parsed as HTML: + +```````````````````````````````` example +<33> <__> +. +<p><33> <__></p> +```````````````````````````````` + + +Illegal attribute names: + +```````````````````````````````` example +<a h*#ref="hi"> +. +<p><a h*#ref="hi"></p> +```````````````````````````````` + + +Illegal attribute values: + +```````````````````````````````` example +<a href="hi'> <a href=hi'> +. +<p><a href="hi'> <a href=hi'></p> +```````````````````````````````` + + +Illegal [whitespace]: + +```````````````````````````````` example +< a>< +foo><bar/ > +<foo bar=baz +bim!bop /> +. +<p>< a>< +foo><bar/ > +<foo bar=baz +bim!bop /></p> +```````````````````````````````` + + +Missing [whitespace]: + +```````````````````````````````` example +<a href='bar'title=title> +. +<p><a href='bar'title=title></p> +```````````````````````````````` + + +Closing tags: + +```````````````````````````````` example +</a></foo > +. +<p></a></foo ></p> +```````````````````````````````` + + +Illegal attributes in closing tag: + +```````````````````````````````` example +</a href="foo"> +. +<p></a href="foo"></p> +```````````````````````````````` + + +Comments: + +```````````````````````````````` example +foo <!-- this is a +comment - with hyphen --> +. +<p>foo <!-- this is a +comment - with hyphen --></p> +```````````````````````````````` + + +```````````````````````````````` example +foo <!-- not a comment -- two hyphens --> +. +<p>foo <!-- not a comment -- two hyphens --></p> +```````````````````````````````` + + +Not comments: + +```````````````````````````````` example +foo <!--> foo --> + +foo <!-- foo---> +. +<p>foo <!--> foo --></p> +<p>foo <!-- foo---></p> +```````````````````````````````` + + +Processing instructions: + +```````````````````````````````` example +foo <?php echo $a; ?> +. +<p>foo <?php echo $a; ?></p> +```````````````````````````````` + + +Declarations: + +```````````````````````````````` example +foo <!ELEMENT br EMPTY> +. +<p>foo <!ELEMENT br EMPTY></p> +```````````````````````````````` + + +CDATA sections: + +```````````````````````````````` example +foo <![CDATA[>&<]]> +. +<p>foo <![CDATA[>&<]]></p> +```````````````````````````````` + + +Entity and numeric character references are preserved in HTML +attributes: + +```````````````````````````````` example +foo <a href="ö"> +. +<p>foo <a href="ö"></p> +```````````````````````````````` + + +Backslash escapes do not work in HTML attributes: + +```````````````````````````````` example +foo <a href="\*"> +. +<p>foo <a href="\*"></p> +```````````````````````````````` + + +```````````````````````````````` example +<a href="\""> +. +<p><a href="""></p> +```````````````````````````````` + + +## Hard line breaks + +A line break (not in a code span or HTML tag) that is preceded +by two or more spaces and does not occur at the end of a block +is parsed as a [hard line break](@) (rendered +in HTML as a `<br />` tag): + +```````````````````````````````` example +foo +baz +. +<p>foo<br /> +baz</p> +```````````````````````````````` + + +For a more visible alternative, a backslash before the +[line ending] may be used instead of two spaces: + +```````````````````````````````` example +foo\ +baz +. +<p>foo<br /> +baz</p> +```````````````````````````````` + + +More than two spaces can be used: + +```````````````````````````````` example +foo +baz +. +<p>foo<br /> +baz</p> +```````````````````````````````` + + +Leading spaces at the beginning of the next line are ignored: + +```````````````````````````````` example +foo + bar +. +<p>foo<br /> +bar</p> +```````````````````````````````` + + +```````````````````````````````` example +foo\ + bar +. +<p>foo<br /> +bar</p> +```````````````````````````````` + + +Line breaks can occur inside emphasis, links, and other constructs +that allow inline content: + +```````````````````````````````` example +*foo +bar* +. +<p><em>foo<br /> +bar</em></p> +```````````````````````````````` + + +```````````````````````````````` example +*foo\ +bar* +. +<p><em>foo<br /> +bar</em></p> +```````````````````````````````` + + +Line breaks do not occur inside code spans + +```````````````````````````````` example +`code +span` +. +<p><code>code span</code></p> +```````````````````````````````` + + +```````````````````````````````` example +`code\ +span` +. +<p><code>code\ span</code></p> +```````````````````````````````` + + +or HTML tags: + +```````````````````````````````` example +<a href="foo +bar"> +. +<p><a href="foo +bar"></p> +```````````````````````````````` + + +```````````````````````````````` example +<a href="foo\ +bar"> +. +<p><a href="foo\ +bar"></p> +```````````````````````````````` + + +Hard line breaks are for separating inline content within a block. +Neither syntax for hard line breaks works at the end of a paragraph or +other block element: + +```````````````````````````````` example +foo\ +. +<p>foo\</p> +```````````````````````````````` + + +```````````````````````````````` example +foo +. +<p>foo</p> +```````````````````````````````` + + +```````````````````````````````` example +### foo\ +. +<h3>foo\</h3> +```````````````````````````````` + + +```````````````````````````````` example +### foo +. +<h3>foo</h3> +```````````````````````````````` + + +## Soft line breaks + +A regular line break (not in a code span or HTML tag) that is not +preceded by two or more spaces or a backslash is parsed as a +[softbreak](@). (A softbreak may be rendered in HTML either as a +[line ending] or as a space. The result will be the same in +browsers. In the examples here, a [line ending] will be used.) + +```````````````````````````````` example +foo +baz +. +<p>foo +baz</p> +```````````````````````````````` + + +Spaces at the end of the line and beginning of the next line are +removed: + +```````````````````````````````` example +foo + baz +. +<p>foo +baz</p> +```````````````````````````````` + + +A conforming parser may render a soft line break in HTML either as a +line break or as a space. + +A renderer may also provide an option to render soft line breaks +as hard line breaks. + +## Textual content + +Any characters not given an interpretation by the above rules will +be parsed as plain textual content. + +```````````````````````````````` example +hello $.;'there +. +<p>hello $.;'there</p> +```````````````````````````````` + + +```````````````````````````````` example +Foo χρῆν +. +<p>Foo χρῆν</p> +```````````````````````````````` + + +Internal spaces are preserved verbatim: + +```````````````````````````````` example +Multiple spaces +. +<p>Multiple spaces</p> +```````````````````````````````` + + +<!-- END TESTS --> + +# Appendix: A parsing strategy + +In this appendix we describe some features of the parsing strategy +used in the CommonMark reference implementations. + +## Overview + +Parsing has two phases: + +1. In the first phase, lines of input are consumed and the block +structure of the document---its division into paragraphs, block quotes, +list items, and so on---is constructed. Text is assigned to these +blocks but not parsed. Link reference definitions are parsed and a +map of links is constructed. + +2. In the second phase, the raw text contents of paragraphs and headings +are parsed into sequences of Markdown inline elements (strings, +code spans, links, emphasis, and so on), using the map of link +references constructed in phase 1. + +At each point in processing, the document is represented as a tree of +**blocks**. The root of the tree is a `document` block. The `document` +may have any number of other blocks as **children**. These children +may, in turn, have other blocks as children. The last child of a block +is normally considered **open**, meaning that subsequent lines of input +can alter its contents. (Blocks that are not open are **closed**.) +Here, for example, is a possible document tree, with the open blocks +marked by arrows: + +``` tree +-> document + -> block_quote + paragraph + "Lorem ipsum dolor\nsit amet." + -> list (type=bullet tight=true bullet_char=-) + list_item + paragraph + "Qui *quodsi iracundia*" + -> list_item + -> paragraph + "aliquando id" +``` + +## Phase 1: block structure + +Each line that is processed has an effect on this tree. The line is +analyzed and, depending on its contents, the document may be altered +in one or more of the following ways: + +1. One or more open blocks may be closed. +2. One or more new blocks may be created as children of the + last open block. +3. Text may be added to the last (deepest) open block remaining + on the tree. + +Once a line has been incorporated into the tree in this way, +it can be discarded, so input can be read in a stream. + +For each line, we follow this procedure: + +1. First we iterate through the open blocks, starting with the +root document, and descending through last children down to the last +open block. Each block imposes a condition that the line must satisfy +if the block is to remain open. For example, a block quote requires a +`>` character. A paragraph requires a non-blank line. +In this phase we may match all or just some of the open +blocks. But we cannot close unmatched blocks yet, because we may have a +[lazy continuation line]. + +2. Next, after consuming the continuation markers for existing +blocks, we look for new block starts (e.g. `>` for a block quote). +If we encounter a new block start, we close any blocks unmatched +in step 1 before creating the new block as a child of the last +matched block. + +3. Finally, we look at the remainder of the line (after block +markers like `>`, list markers, and indentation have been consumed). +This is text that can be incorporated into the last open +block (a paragraph, code block, heading, or raw HTML). + +Setext headings are formed when we see a line of a paragraph +that is a [setext heading underline]. + +Reference link definitions are detected when a paragraph is closed; +the accumulated text lines are parsed to see if they begin with +one or more reference link definitions. Any remainder becomes a +normal paragraph. + +We can see how this works by considering how the tree above is +generated by four lines of Markdown: + +``` markdown +> Lorem ipsum dolor +sit amet. +> - Qui *quodsi iracundia* +> - aliquando id +``` + +At the outset, our document model is just + +``` tree +-> document +``` + +The first line of our text, + +``` markdown +> Lorem ipsum dolor +``` + +causes a `block_quote` block to be created as a child of our +open `document` block, and a `paragraph` block as a child of +the `block_quote`. Then the text is added to the last open +block, the `paragraph`: + +``` tree +-> document + -> block_quote + -> paragraph + "Lorem ipsum dolor" +``` + +The next line, + +``` markdown +sit amet. +``` + +is a "lazy continuation" of the open `paragraph`, so it gets added +to the paragraph's text: + +``` tree +-> document + -> block_quote + -> paragraph + "Lorem ipsum dolor\nsit amet." +``` + +The third line, + +``` markdown +> - Qui *quodsi iracundia* +``` + +causes the `paragraph` block to be closed, and a new `list` block +opened as a child of the `block_quote`. A `list_item` is also +added as a child of the `list`, and a `paragraph` as a child of +the `list_item`. The text is then added to the new `paragraph`: + +``` tree +-> document + -> block_quote + paragraph + "Lorem ipsum dolor\nsit amet." + -> list (type=bullet tight=true bullet_char=-) + -> list_item + -> paragraph + "Qui *quodsi iracundia*" +``` + +The fourth line, + +``` markdown +> - aliquando id +``` + +causes the `list_item` (and its child the `paragraph`) to be closed, +and a new `list_item` opened up as child of the `list`. A `paragraph` +is added as a child of the new `list_item`, to contain the text. +We thus obtain the final tree: + +``` tree +-> document + -> block_quote + paragraph + "Lorem ipsum dolor\nsit amet." + -> list (type=bullet tight=true bullet_char=-) + list_item + paragraph + "Qui *quodsi iracundia*" + -> list_item + -> paragraph + "aliquando id" +``` + +## Phase 2: inline structure + +Once all of the input has been parsed, all open blocks are closed. + +We then "walk the tree," visiting every node, and parse raw +string contents of paragraphs and headings as inlines. At this +point we have seen all the link reference definitions, so we can +resolve reference links as we go. + +``` tree +document + block_quote + paragraph + str "Lorem ipsum dolor" + softbreak + str "sit amet." + list (type=bullet tight=true bullet_char=-) + list_item + paragraph + str "Qui " + emph + str "quodsi iracundia" + list_item + paragraph + str "aliquando id" +``` + +Notice how the [line ending] in the first paragraph has +been parsed as a `softbreak`, and the asterisks in the first list item +have become an `emph`. + +### An algorithm for parsing nested emphasis and links + +By far the trickiest part of inline parsing is handling emphasis, +strong emphasis, links, and images. This is done using the following +algorithm. + +When we're parsing inlines and we hit either + +- a run of `*` or `_` characters, or +- a `[` or `![` + +we insert a text node with these symbols as its literal content, and we +add a pointer to this text node to the [delimiter stack](@). + +The [delimiter stack] is a doubly linked list. Each +element contains a pointer to a text node, plus information about + +- the type of delimiter (`[`, `![`, `*`, `_`) +- the number of delimiters, +- whether the delimiter is "active" (all are active to start), and +- whether the delimiter is a potential opener, a potential closer, + or both (which depends on what sort of characters precede + and follow the delimiters). + +When we hit a `]` character, we call the *look for link or image* +procedure (see below). + +When we hit the end of the input, we call the *process emphasis* +procedure (see below), with `stack_bottom` = NULL. + +#### *look for link or image* + +Starting at the top of the delimiter stack, we look backwards +through the stack for an opening `[` or `![` delimiter. + +- If we don't find one, we return a literal text node `]`. + +- If we do find one, but it's not *active*, we remove the inactive + delimiter from the stack, and return a literal text node `]`. + +- If we find one and it's active, then we parse ahead to see if + we have an inline link/image, reference link/image, compact reference + link/image, or shortcut reference link/image. + + + If we don't, then we remove the opening delimiter from the + delimiter stack and return a literal text node `]`. + + + If we do, then + + * We return a link or image node whose children are the inlines + after the text node pointed to by the opening delimiter. + + * We run *process emphasis* on these inlines, with the `[` opener + as `stack_bottom`. + + * We remove the opening delimiter. + + * If we have a link (and not an image), we also set all + `[` delimiters before the opening delimiter to *inactive*. (This + will prevent us from getting links within links.) + +#### *process emphasis* + +Parameter `stack_bottom` sets a lower bound to how far we +descend in the [delimiter stack]. If it is NULL, we can +go all the way to the bottom. Otherwise, we stop before +visiting `stack_bottom`. + +Let `current_position` point to the element on the [delimiter stack] +just above `stack_bottom` (or the first element if `stack_bottom` +is NULL). + +We keep track of the `openers_bottom` for each delimiter +type (`*`, `_`) and each length of the closing delimiter run +(modulo 3). Initialize this to `stack_bottom`. + +Then we repeat the following until we run out of potential +closers: + +- Move `current_position` forward in the delimiter stack (if needed) + until we find the first potential closer with delimiter `*` or `_`. + (This will be the potential closer closest + to the beginning of the input -- the first one in parse order.) + +- Now, look back in the stack (staying above `stack_bottom` and + the `openers_bottom` for this delimiter type) for the + first matching potential opener ("matching" means same delimiter). + +- If one is found: + + + Figure out whether we have emphasis or strong emphasis: + if both closer and opener spans have length >= 2, we have + strong, otherwise regular. + + + Insert an emph or strong emph node accordingly, after + the text node corresponding to the opener. + + + Remove any delimiters between the opener and closer from + the delimiter stack. + + + Remove 1 (for regular emph) or 2 (for strong emph) delimiters + from the opening and closing text nodes. If they become empty + as a result, remove them and remove the corresponding element + of the delimiter stack. If the closing node is removed, reset + `current_position` to the next element in the stack. + +- If none is found: + + + Set `openers_bottom` to the element before `current_position`. + (We know that there are no openers for this kind of closer up to and + including this point, so this puts a lower bound on future searches.) + + + If the closer at `current_position` is not a potential opener, + remove it from the delimiter stack (since we know it can't + be a closer either). + + + Advance `current_position` to the next element in the stack. + +After we're done, we remove all delimiters above `stack_bottom` from the +delimiter stack. diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 7eb8d934..797491b6 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -271,6 +271,9 @@ def render_em_open(self, token): def render_softbreak(self, token): self.current_node.append(nodes.Text("\n")) + def render_hardbreak(self, token): + self.current_node.append(nodes.raw("", "<br />\n", format="html")) + def render_strong_open(self, token): node = nodes.strong() self.add_line_and_source_path(node, token) @@ -293,6 +296,16 @@ def render_code_inline(self, token): self.add_line_and_source_path(node, token) self.current_node.append(node) + def render_code_block(self, token): + # this should never have a language, since it is just indented text, however, + # creating a literal_block with no language will raise a warning in sphinx + text = token.content + language = token.info.split()[0] if token.info else "none" + language = language or "none" + node = nodes.literal_block(text, text, language=language) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + def render_fence(self, token): text = token.content language = token.info.split()[0] if token.info else "" @@ -446,24 +459,52 @@ def render_front_matter(self, token): docinfo = dict_to_docinfo(data) self.current_node.append(docinfo) - # def render_table_open(self, token): - # # print(token) - # # raise - - # table = nodes.table() - # table["classes"] += ["colwidths-auto"] - # self.add_line_and_source_path(table, token) - - # thead = nodes.thead() - # # TODO there can never be more than one header row (at least in mardown-it) - # header = token.children[0].children[0] - # for hrow in header.children: - # nodes.t - # style = hrow.attrGet("style") - - # tgroup = nodes.tgroup(cols) - # table += tgroup - # tgroup += thead + def render_table_open(self, token): + + # markdown-it table always contains two elements: + header = token.children[0] + body = token.children[1] + # with one header row + header_row = header.children[0] + + # top-level element + table = nodes.table() + table["classes"] += ["colwidths-auto"] + self.add_line_and_source_path(table, token) + self.current_node.append(table) + + # column settings element + maxcols = len(header_row.children) + colwidths = [round(100 / maxcols, 2)] * maxcols + tgroup = nodes.tgroup(cols=len(colwidths)) + table += tgroup + for colwidth in colwidths: + colspec = nodes.colspec(colwidth=colwidth) + tgroup += colspec + + # header + thead = nodes.thead() + tgroup += thead + with self.current_node_context(thead): + self.render_table_row(header_row) + + # body + tbody = nodes.tbody() + tgroup += tbody + with self.current_node_context(tbody): + for body_row in body.children: + self.render_table_row(body_row) + + def render_table_row(self, token): + row = nodes.row() + with self.current_node_context(row, append=True): + for child in token.children: + entry = nodes.entry() + style = child.attrGet("style") # i.e. the alignment when using e.g. :-- + if style: + entry["classes"].append(style) + with self.current_node_context(entry, append=True): + self.render_children(child) def render_math_inline(self, token): content = token.content @@ -471,6 +512,12 @@ def render_math_inline(self, token): self.add_line_and_source_path(node, token) self.current_node.append(node) + def render_math_single(self, token): + content = token.content + node = nodes.math(content, content) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + def render_math_block(self, token): content = token.content node = nodes.math_block(content, content, nowrap=False, number=None) diff --git a/myst_parser/main.py b/myst_parser/main.py index 278c9799..657d641f 100644 --- a/myst_parser/main.py +++ b/myst_parser/main.py @@ -12,6 +12,8 @@ from myst_parser.docutils_renderer import DocutilsRenderer from myst_parser.docutils_renderer import make_document +from . import __version__ # noqa: F401 + def default_parser( renderer="sphinx", disable_syntax=(), math_delimiters="dollars" @@ -51,6 +53,7 @@ def to_docutils( options=None, env=None, document: docutils_doc = None, + renderer="sphinx", in_sphinx_env: bool = False, disable_syntax: List[str] = (), math_delimiters: str = "dollars", @@ -66,7 +69,11 @@ def to_docutils( :param disable_syntax: list of syntax element names to disable """ - md = default_parser(disable_syntax=disable_syntax, math_delimiters=math_delimiters) + md = default_parser( + renderer=renderer, + disable_syntax=disable_syntax, + math_delimiters=math_delimiters, + ) if options: md.options.update(options) md.options["document"] = document or make_document() diff --git a/myst_parser/mocking.py b/myst_parser/mocking.py index 06cda49f..3c069bae 100644 --- a/myst_parser/mocking.py +++ b/myst_parser/mocking.py @@ -114,8 +114,9 @@ def inline_text(self, text: str, lineno: int): from myst_parser.docutils_renderer import DocutilsRenderer nested_renderer = DocutilsRenderer(self._renderer.md) - options = dict( - document=self.document, current_node=paragraph, output_footnotes=False + options = {k: v for k, v in self._renderer.config.items()} + options.update( + dict(document=self.document, current_node=paragraph, output_footnotes=False) ) nested_renderer.render(tokens, options, self._renderer.env) return paragraph.children, messages diff --git a/tests/test_cli.py b/tests/test_cli.py index 4b069ca9..3643507d 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -8,4 +8,4 @@ def test_benchmark(): with tempfile.TemporaryDirectory() as tempdir: path = pathlib.Path(tempdir).joinpath("test.md") path.write_text("a b c") - assert benchmark.main(["-n", "1", "-p", "myst_parser:html", str(path)]) + assert benchmark.main(["-n", "1", "-p", "myst-parser:html", "-f", str(path)]) diff --git a/tests/test_renderers/fixtures/sphinx_directives.md b/tests/test_renderers/fixtures/sphinx_directives.md index c199a634..e856684f 100644 --- a/tests/test_renderers/fixtures/sphinx_directives.md +++ b/tests/test_renderers/fixtures/sphinx_directives.md @@ -264,7 +264,6 @@ foo -------------------------------- table (`sphinx.directives.patches.RSTTable`): -SKIP: Need to implement tables render . ```{table} diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index fa2c8c05..0f8a2985 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -275,19 +275,6 @@ $$foo$$ (abc) foo . --------------------------- -Table: -. -a|b --|- -1|2 -. -<document source="notset"> - <target ids="equation-abc"> - <math_block label="abc" math_number="1" nowrap="False" number="True" xml:space="preserve"> - foo -. - -------------------------- Sphinx Role containing backtick: . diff --git a/tests/test_renderers/fixtures/tables.md b/tests/test_renderers/fixtures/tables.md new file mode 100644 index 00000000..7b6d985a --- /dev/null +++ b/tests/test_renderers/fixtures/tables.md @@ -0,0 +1,87 @@ +-------------------------- +Simple: +. +a|b +-|- +1|2 +. +<document source="notset"> + <table classes="colwidths-auto"> + <tgroup cols="2"> + <colspec colwidth="50.0"> + <colspec colwidth="50.0"> + <thead> + <row> + <entry> + a + <entry> + b + <tbody> + <row> + <entry> + 1 + <entry> + 2 +. + +-------------------------- +Aligned: +. +a | b | c +:-|:-:| -: +1 | 2 | 3 +. +<document source="notset"> + <table classes="colwidths-auto"> + <tgroup cols="3"> + <colspec colwidth="33.33"> + <colspec colwidth="33.33"> + <colspec colwidth="33.33"> + <thead> + <row> + <entry classes="text-align:left"> + a + <entry classes="text-align:center"> + b + <entry classes="text-align:right"> + c + <tbody> + <row> + <entry classes="text-align:left"> + 1 + <entry classes="text-align:center"> + 2 + <entry classes="text-align:right"> + 3 +. + +-------------------------- +Nested syntax: +. +| *a* | __*b*__ | +| --- | -------- | +|$1$ | {sub}`x` | +. +<document source="notset"> + <table classes="colwidths-auto"> + <tgroup cols="2"> + <colspec colwidth="50.0"> + <colspec colwidth="50.0"> + <thead> + <row> + <entry> + <emphasis> + a + <entry> + <strong> + <emphasis> + b + <tbody> + <row> + <entry> + <math> + 1 + <entry> + <subscript> + x +. \ No newline at end of file diff --git a/tests/test_renderers/test_fixtures.py b/tests/test_renderers/test_fixtures.py index fd24f041..cb157442 100644 --- a/tests/test_renderers/test_fixtures.py +++ b/tests/test_renderers/test_fixtures.py @@ -20,6 +20,17 @@ def test_syntax_elements(line, title, input, expected): ) == "\n".join([l.rstrip() for l in expected.splitlines()]) +@pytest.mark.parametrize( + "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("tables.md")) +) +def test_tables(line, title, input, expected): + document = to_docutils(input, in_sphinx_env=True) + print(document.pformat()) + assert "\n".join( + [l.rstrip() for l in document.pformat().splitlines()] + ) == "\n".join([l.rstrip() for l in expected.splitlines()]) + + @pytest.mark.parametrize( "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("role_options.md")), diff --git a/tests/test_sphinx/sourcedirs/basic/content.md b/tests/test_sphinx/sourcedirs/basic/content.md index 7098ac81..ca4e6f9c 100644 --- a/tests/test_sphinx/sourcedirs/basic/content.md +++ b/tests/test_sphinx/sourcedirs/basic/content.md @@ -41,7 +41,7 @@ $$c=2$$ (eq:label) `` a=1{`} `` | a | b | -|-----|---| +|-----|--:| | *a* | 2 | this diff --git a/tests/test_sphinx/test_sphinx_builds/test_basic.html b/tests/test_sphinx/test_sphinx_builds/test_basic.html index e424f6b6..d2f0a87d 100644 --- a/tests/test_sphinx/test_sphinx_builds/test_basic.html +++ b/tests/test_sphinx/test_sphinx_builds/test_basic.html @@ -48,7 +48,7 @@ <h1> </p> </div> <p> - <img alt="" src="_images/example.jpg"/> + <img alt="alternative text" src="_images/example.jpg"/> </p> <p> <a class="reference external" href="https://www.google.com"> @@ -76,10 +76,19 @@ <h1> </span> </strong> </p> + <div class="math notranslate nohighlight" id="equation-eq-label"> + <span class="eqno"> + (1) + <a class="headerlink" href="#equation-eq-label" title="Permalink to this equation"> + ¶ + </a> + </span> + \[c=2\] + </div> <p> - <div class="math notranslate nohighlight"> - \[b=2\] - </div> + <a class="reference internal" href="#equation-eq-label"> + (1) + </a> </p> <p> <code class="docutils literal notranslate"> @@ -94,7 +103,7 @@ <h1> <th class="head"> a </th> - <th class="head"> + <th class="text-align:right head"> b </th> </tr> @@ -106,7 +115,7 @@ <h1> a </em> </td> - <td> + <td class="text-align:right"> 2 </td> </tr> diff --git a/tests/test_sphinx/test_sphinx_builds/test_basic.xml b/tests/test_sphinx/test_sphinx_builds/test_basic.xml index fbd00f0b..1ec5b60e 100644 --- a/tests/test_sphinx/test_sphinx_builds/test_basic.xml +++ b/tests/test_sphinx/test_sphinx_builds/test_basic.xml @@ -47,6 +47,23 @@ <paragraph> <literal> a=1{`} + <table align="default" classes="colwidths-auto"> + <tgroup cols="2"> + <colspec colwidth="50.0"> + <colspec colwidth="50.0"> + <thead> + <row> + <entry> + a + <entry classes="text-align:right"> + b + <tbody> + <row> + <entry> + <emphasis> + a + <entry classes="text-align:right"> + 2 <paragraph> this From 4a42c10b7dd304cd312f2b7ed8cda5db3f418a98 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 06:16:13 +0000 Subject: [PATCH 05/32] Update pydata-sphinx-theme requirement --- .circleci/config.yml | 1 - .readthedocs.yml | 1 - docs/requirements.txt | 1 - setup.py | 2 +- 4 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 docs/requirements.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index df947633..2268c962 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,7 +13,6 @@ jobs: - cache-pip - run: | pip install --user -e .[sphinx,rtd] - pip install -r docs/requirements.txt - save_cache: key: cache-pip paths: diff --git a/.readthedocs.yml b/.readthedocs.yml index 1c081c9b..123a4730 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -8,4 +8,3 @@ python: extra_requirements: - sphinx - rtd - - requirements: docs/requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index e250c225..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master diff --git a/setup.py b/setup.py index 798e0cec..b4ff2c77 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ "pytest-regressions", "beautifulsoup4", ], - "rtd": ["sphinxcontrib-bibtex", "ipython"], + "rtd": ["sphinxcontrib-bibtex", "ipython", "pydata-sphinx-theme"], }, zip_safe=True, ) From 12b0bf1d1e51a1da1ff14821f47d8e79f538c9b2 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 07:26:34 +0000 Subject: [PATCH 06/32] Add nested footnote test --- setup.py | 2 +- .../fixtures/syntax_elements.md | 44 +++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b4ff2c77..c09aa653 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.2.3"], + install_requires=["markdown-it-py~=0.3"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index 0f8a2985..cdcf56c1 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -467,6 +467,50 @@ Footnotes: text . +-------------------------- +Footnotes nested blocks: +. +[^a] + +[^a]: footnote*text* + + abc +xyz + + > a + + - b + + c + +finish +. +<document source="notset"> + <paragraph> + <footnote_reference auto="1" ids="id1" refname="a"> + <paragraph> + finish + <transition> + <footnote auto="1" ids="a" names="a"> + <paragraph> + footnote + <emphasis> + text + <paragraph> + abc + + xyz + <block_quote> + <paragraph> + a + <bullet_list> + <list_item> + <paragraph> + b + <paragraph> + c +. + -------------------------- Front Matter: . From 5306dc188f29ae324a42f273133e50064a7ca501 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 07:33:53 +0000 Subject: [PATCH 07/32] Move testing to GitHub --- .github/workflows/tests.yml | 62 +++++++++++++++++++++++++++++++++++++ .travis.yml | 50 ------------------------------ 2 files changed, 62 insertions(+), 50 deletions(-) create mode 100644 .github/workflows/tests.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..6631cbd9 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,62 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Python package + +on: [push, pull_request] + # push: + # branches: [ master ] + # pull_request: + # branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.6, 3.7, 3.8] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install .[sphinx,code_style,testing] + - name: Pre-commit checks + run: | + pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) + # cd tests/test_commonmark && ./spec.sh + - name: Test with pytest + run: | + pip install pytest + pytest --cov=myst_parser --cov-report= + - name: Upload to coveralls + run: | + pip install coveralls + coveralls + env: + COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_KEY }} + + publish: + + name: Publish to PyPi + needs: build + if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') + runs-on: ubuntu-latest + steps: + - name: Checkout source + uses: actions/checkout@v2 + - name: Build package + run: | + pip install wheel + python setup.py sdist bdist_wheel + - name: Publish + uses: pypa/gh-action-pypi-publish@v1.1.0 + with: + user: __token__ + password: ${{ secrets.PYPI_KEY }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 8f5af609..00000000 --- a/.travis.yml +++ /dev/null @@ -1,50 +0,0 @@ -language: python -cache: pip -matrix: - include: - - python: 3.6 - env: TEST_TYPE="pytest" - - python: 3.7 - env: TEST_TYPE="pytest" PYPI_DEPLOY=true - - python: 3.8 - env: TEST_TYPE="pytest" - - python: 3.7 - env: TEST_TYPE="pre-commit" -install: - - pip install --upgrade pip wheel setuptools - - | - if [[ "$TEST_TYPE" == "pre-commit" ]]; then - pip install -e .[code_style] - fi - - | - if [[ "$TEST_TYPE" == "pytest" ]]; then - pip install -e .[testing,sphinx] - pip install coveralls - fi -before_script: - - (cd tests/test_commonmark && ./spec.sh) -script: -- | - if [[ "$TEST_TYPE" == "pytest" ]]; then - pytest -v --cov=myst_parser --cov-report= - fi -- | - if [[ "$TEST_TYPE" == "pre-commit" ]]; then - pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) - fi -after_success: -- | - if [[ "$TEST_TYPE" == "pytest" ]]; then - coveralls - fi - -deploy: - - provider: pypi - distributions: "sdist bdist_wheel" - user: cjsewell - password: - secure: XDaPRuCORFOvwXUfP6S5QOsFOpQmQiURGA603Rs3WkY0Y4up1uQjDWu2wUCh4cNPM5ITMXMFrRCWCFh289LM21c5pzExvx7RpsXfHol6F0hMGxjwmeU2aRltaVCa/zvevdBmgTa0aOMhOGmiLnSHxSAXYUHxkkHG4GmypsAshWNzJGSzSPBwt6YBkqXkeeJYC0S+t8tFEDjtEwn6E3Iy49L25+mI5ip8N6JRgAgR6umTCfKcCMBuyBii+/VDGKGsM4bdiDnXiDYWn0l9hzzSMr1xeiYtXB56N+6eRgm2oprRNnX4widbq3UJ5tp5/p3R7xXBi7NTz82d2vncNjk9Q3y35AyNb08Y2jJSsAw5CcWjQITvxMsHjhLibvrn/skkLgtE005ItNn4IFceH/y6HgmBn1yrA2z9bxfB5VLmsl4UqXYvn7Bd0l8C4IIbhsxtvH/yOtJ0sXQI3HTZVepJcrf8mAGfJVdnd9eqtMfXIr+6vlRqJByMK8f7drpdfWynC8bzfFrNIh/OHKoZzvEk7o6cDCj91vPcpxygecHvPDf0Tx/vdOFwEF8LTXNWVPjTHJMzurw946vpArCoAC1T8uwF+5fiu/ZcEfudKKincS6K9/2aFzgpoKOdbKP9/1h3hRjjPKc8HqTt9jziwuOl//KS0DJS3BSZZvFyaBI2p/Y= - on: - branch: master - tags: true - condition: $PYPI_DEPLOY = true From 6be709dd624b0aed166c155420040ff258f8d765 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 07:41:04 +0000 Subject: [PATCH 08/32] Add github CI badges --- README.md | 6 +++--- docs/develop/contributing.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ca1e9a2b..20bf7b8c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # MyST-Parser -[![CI Status][travis-badge]][travis-link] +[![Github-CI][github-ci]][github-link] [![Coverage][coveralls-badge]][coveralls-link] [![Documentation Status][rtd-badge]][rtd-link] [![Code style: black][black-badge]][black-link] @@ -35,8 +35,8 @@ pip install -e .[sphinx,code_style,testing,rtd] To use the MyST parser in Sphinx, simply add: `extensions = ["myst_parser"]` to your `conf.py`. -[travis-badge]: https://travis-ci.org/ExecutableBookProject/MyST-Parser.svg?branch=master -[travis-link]: https://travis-ci.org/ExecutableBookProject/MyST-Parser +[github-ci]: https://github.com/ExecutableBookProject/MyST-Parser/workflows/Python%20package/badge.svg?branch=master +[github-link]: https://github.com/ExecutableBookProject/MyST-Parser [coveralls-badge]: https://coveralls.io/repos/github/ExecutableBookProject/MyST-Parser/badge.svg?branch=master [coveralls-link]: https://coveralls.io/github/ExecutableBookProject/MyST-Parser?branch=master [rtd-badge]: https://readthedocs.org/projects/myst-parser/badge/?version=latest diff --git a/docs/develop/contributing.md b/docs/develop/contributing.md index 5575046f..0e097d11 100644 --- a/docs/develop/contributing.md +++ b/docs/develop/contributing.md @@ -1,6 +1,6 @@ # Contributing -[![CI Status][travis-badge]][travis-link] +[![Github-CI][github-ci]][github-link] [![Coverage][coveralls-badge]][coveralls-link] [![CircleCI][circleci-badge]][circleci-link] [![Documentation Status][rtd-badge]][rtd-link] @@ -70,8 +70,8 @@ Merging pull requests: There are three ways of 'merging' pull requests on GitHub - Merge with merge commit: put all commits as they are on the base branch, with a merge commit on top Choose for collaborative PRs with many commits. Here, the merge commit provides actual benefits. -[travis-badge]: https://travis-ci.org/ExecutableBookProject/MyST-Parser.svg?branch=master -[travis-link]: https://travis-ci.org/ExecutableBookProject/MyST-Parser +[github-ci]: https://github.com/ExecutableBookProject/MyST-Parser/workflows/Python%20package/badge.svg?branch=master +[github-link]: https://github.com/ExecutableBookProject/MyST-Parser [coveralls-badge]: https://coveralls.io/repos/github/ExecutableBookProject/MyST-Parser/badge.svg?branch=master [coveralls-link]: https://coveralls.io/github/ExecutableBookProject/MyST-Parser?branch=master [circleci-badge]: https://circleci.com/gh/ExecutableBookProject/MyST-Parser.svg?style=shield From 22eb21809ce1eeddeecc52dfc392125e3e2230d3 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 07:43:29 +0000 Subject: [PATCH 09/32] Update conf.py --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 359543d7..dbad9387 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -50,7 +50,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "pandas_sphinx_theme" +html_theme = "pydata_sphinx_theme" html_logo = "_static/logo.png" # Add any paths that contain custom static files (such as style sheets) here, From b8fb8081f64699e7fd2307094ec4b7f56f9ee9a7 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 13:29:00 +0000 Subject: [PATCH 10/32] Partial update of documentation --- .vscode/settings.json | 3 +- docs/api/index.md | 1 - docs/api/renderers.rst | 70 +++++++-------- docs/api/tokens.rst | 55 ------------ docs/develop/architecture.md | 11 +-- docs/develop/test_infrastructure.md | 9 +- docs/using/syntax.md | 89 ++++++++++++------- myst_parser/cli/benchmark.py | 6 ++ myst_parser/docutils_renderer.py | 1 + myst_parser/sphinx_renderer.py | 3 +- setup.py | 2 +- .../fixtures/syntax_elements.md | 21 +++++ .../test_sphinx_builds/test_basic.xml | 2 + 13 files changed, 130 insertions(+), 143 deletions(-) delete mode 100644 docs/api/tokens.rst diff --git a/.vscode/settings.json b/.vscode/settings.json index 9b88e43c..30649c14 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -16,5 +16,6 @@ "python.linting.flake8Enabled": true, "python.linting.enabled": true, "autoDocstring.customTemplatePath": "docstring.fmt.mustache", - "python.pythonPath": "/anaconda/envs/ebp/bin/python" + "python.pythonPath": "/anaconda/envs/ebp/bin/python", + "restructuredtext.confPath": "${workspaceFolder}/docs" } \ No newline at end of file diff --git a/docs/api/index.md b/docs/api/index.md index 7505f1fe..3bc97c64 100644 --- a/docs/api/index.md +++ b/docs/api/index.md @@ -5,7 +5,6 @@ ```{toctree} :maxdepth: 2 -tokens.rst directive.rst renderers.rst sphinx_parser.rst diff --git a/docs/api/renderers.rst b/docs/api/renderers.rst index 7017f0e4..0229bbb9 100644 --- a/docs/api/renderers.rst +++ b/docs/api/renderers.rst @@ -3,71 +3,69 @@ MyST Renderers -------------- -MyST-Parser builds on the mistletoe -:ref:`core renderers <mistletoe:renderers/core>` -by including the extended tokens, listed in :ref:`api/tokens`, -and adding bridges to docutils/sphinx: -HTML -.... +These renderers take the markdown-it parsed token stream and convert it to +the docutils AST. The sphinx renderer is a subclass of the docutils one, +with some additional methods only available *via* sphinx +.e.g. multi-document cross-referencing. -.. autoclass:: myst_parser.html_renderer.HTMLRenderer - :special-members: __init__, __enter__, __exit__ - :members: default_block_tokens, default_span_tokens - :undoc-members: - :member-order: alphabetical - :show-inheritance: +Docutils +........ -JSON -.... - -.. autoclass:: myst_parser.json_renderer.JsonRenderer - :special-members: __init__, __enter__, __exit__ - :members: default_block_tokens, default_span_tokens +.. autoclass:: myst_parser.docutils_renderer.DocutilsRenderer + :special-members: __output__, __init__ + :members: render, nested_render_text, add_line_and_source_path, current_node_context :undoc-members: - :member-order: alphabetical + :member-order: bysource :show-inheritance: -Docutils -........ -.. autoclass:: myst_parser.docutils_renderer.DocutilsRenderer - :special-members: __init__, __enter__, __exit__ - :members: default_block_tokens, default_span_tokens, new_document +Sphinx +...... + +.. autoclass:: myst_parser.sphinx_renderer.SphinxRenderer + :special-members: __output__ + :members: handle_cross_reference, render_math_block_eqno :undoc-members: :member-order: alphabetical :show-inheritance: +Mocking +....... + +These classes are parsed to sphinx roles and directives, +to mimic the original docutls rST specific parser elements, +but instead run nested parsing with the markdown parser. -.. autoclass:: myst_parser.docutils_renderer.MockInliner +. autoclass:: myst_parser.mocking.MockInliner :members: :undoc-members: :show-inheritance: -.. autoclass:: myst_parser.docutils_renderer.MockState +.. autoclass:: myst_parser.mocking.MockState :members: :undoc-members: :show-inheritance: -.. autoclass:: myst_parser.docutils_renderer.MockStateMachine +.. autoclass:: myst_parser.mocking.MockStateMachine :members: :undoc-members: :show-inheritance: -.. autoclass:: myst_parser.docutils_renderer.MockIncludeDirective +.. autoclass:: myst_parser.mocking.MockIncludeDirective :members: :undoc-members: :show-inheritance: -Sphinx -...... -.. autoclass:: myst_parser.docutils_renderer.SphinxRenderer - :special-members: __init__, __enter__, __exit__ - :members: default_block_tokens, default_span_tokens, mock_sphinx_env - :undoc-members: - :member-order: alphabetical - :show-inheritance: +Additional Methods +.................. + +.. autofunction:: myst_parser.docutils_renderer.make_document .. autofunction:: myst_parser.docutils_renderer.dict_to_docinfo + +.. autofunction:: myst_parser.sphinx_renderer.minimal_sphinx_app + +.. autofunction:: myst_parser.sphinx_renderer.mock_sphinx_env diff --git a/docs/api/tokens.rst b/docs/api/tokens.rst deleted file mode 100644 index dede5c83..00000000 --- a/docs/api/tokens.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _api/tokens: - -Extended AST Tokens -------------------- - -MyST builds on the mistletoe tokens, to extend the syntax: - -- :ref:`Core block tokens <mistletoe:tokens/block>` -- :ref:`Core span tokens <mistletoe:tokens/span>` -- :ref:`Extension tokens <mistletoe:tokens/extension>` - - -.. seealso:: - - :ref:`example_syntax` - - -LineComment -........... - -.. autoclass:: myst_parser.block_tokens.LineComment - :members: - :no-undoc-members: - :show-inheritance: - :exclude-members: __init__ - - -BlockBreak -.......... - -.. autoclass:: myst_parser.block_tokens.BlockBreak - :members: - :no-undoc-members: - :show-inheritance: - :exclude-members: __init__ - - -Role -.... - -.. autoclass:: myst_parser.span_tokens.Role - :members: - :no-undoc-members: - :show-inheritance: - :exclude-members: __init__ - - -Target -...... - -.. autoclass:: myst_parser.span_tokens.Target - :members: - :no-undoc-members: - :show-inheritance: - :exclude-members: __init__ diff --git a/docs/develop/architecture.md b/docs/develop/architecture.md index 35715ad2..18b35d6f 100644 --- a/docs/develop/architecture.md +++ b/docs/develop/architecture.md @@ -3,16 +3,9 @@ This page describes implementation details to help you understand the structure of the project. -```{note} -MyST currently relies on -[a fork of the Mistletoe project](https://github.com/ExecutableBookProject/mistletoe). -We hope to upstream these changes, but in the meantime make sure that you are using -this fork. -``` +## A Renderer for markdown-it tokens -## An extension to Mistletoe syntax - -At a high level, the MyST parser is an extension of the Mistletoe project. Mistletoe +At a high level, the MyST parser is an extension of th project. Markdown-It-Py is a well-structured Python parser for CommonMark text. It also defines an extension point to include more syntax in parsed files. The MyST parser uses this extension point to define its own syntax options (e.g., for Sphinx roles and directives). diff --git a/docs/develop/test_infrastructure.md b/docs/develop/test_infrastructure.md index 2956c5a0..03a4058b 100644 --- a/docs/develop/test_infrastructure.md +++ b/docs/develop/test_infrastructure.md @@ -12,11 +12,10 @@ The tests are run using [pytest](https://docs.pytest.org)/[TravisCI](https://tra The tests are ordered in a hierarchical fashion: -1. In `tests/test_syntax` are tests that check that the source text is being correctly converted to the Markdown ([mistletoe](https://github.com/miyuchina/mistletoe)) AST. -2. In `tests/test_commonmark` the [CommonMark](https://github.com/commonmark/CommonMark.git) test set is run; to check that the parser is complying with the CommonMark specification. -3. In `tests/test_renderers` are tests that check that the Markdown AST is being correctly converted to the docutils/sphinx AST. This includes testing that roles and directives are correctly parsed and run. -4. In `tests/test_sphinx` are tests that check that minimal sphinx project builds are running correctly, to convert MyST markdown files to HTML. -5. In `.circleci` the package documentation (written in MyST format) is built and tested for build errors/warnings. +1. In `tests/test_commonmark` the [CommonMark](https://github.com/commonmark/CommonMark.git) test set is run; to check that the parser is complying with the CommonMark specification. +2. In `tests/test_renderers` are tests that check that the Markdown AST is being correctly converted to the docutils/sphinx AST. This includes testing that roles and directives are correctly parsed and run. +3. In `tests/test_sphinx` are tests that check that minimal sphinx project builds are running correctly, to convert MyST markdown files to HTML. +4. In `.circleci` the package documentation (written in MyST format) is built and tested for build errors/warnings. ## Test tools diff --git a/docs/using/syntax.md b/docs/using/syntax.md index 2a473780..5e6dd7f0 100644 --- a/docs/using/syntax.md +++ b/docs/using/syntax.md @@ -3,7 +3,7 @@ # The MyST Syntax Guide As a base, MyST adheres to the [CommonMark specification](https://spec.commonmark.org/). -For this, it uses the {ref}`mistletoe:intro/top-level` parser, +For this, it uses the [markdown-it-py](https://github.com/ExecutableBookProject/markdown-it-py) parser, which is a well-structured markdown parser for Python that is CommonMark-compliant and also extensible. @@ -18,24 +18,16 @@ and extensibility of Sphinx with the simplicity and readability of Markdown. Below is a summary of the syntax 'tokens' parsed, and further details of a few major extensions from the CommonMark flavor of markdown. -```{seealso} -{ref}`MyST Extended AST Tokens API <api/tokens>` -``` - -## Parsed Token Classes +% ```{seealso} +% {ref}`MyST Extended AST Tokens API <api/tokens>` +% ``` -MyST builds on the tokens defined by mistletoe, to extend the syntax: +## Parsed Token -- {ref}`Core block tokens <mistletoe:tokens/block>` -- {ref}`Core span tokens <mistletoe:tokens/span>` -- {ref}`Extension tokens <mistletoe:tokens/extension>` +MyST builds on the tokens defined by markdown-it, to extend the syntax +described in the [CommonMark Spec](https://spec.commonmark.org/0.29/), which the parser is tested against. -Tokens are listed in their order of precedence. -For more information, also see the [CommonMark Spec](https://spec.commonmark.org/0.28/), which the parser is tested against. - -```{seealso} -{ref}`Token API <api/tokens>` -``` +% TODO link to markdown-it documentation ### Block Tokens @@ -587,6 +579,22 @@ This is equivalent to the following directive: ``` ```` +You can also add labels to block equations: + +```latex +$$ +e = mc^2 +$$ (eqn:best) + +This is the best equation {eq}`eqn:best` +``` + +$$ +e = mc^2 +$$ (eqn:best) + +This is the best equation {eq}`eqn:best` + (syntax/frontmatter)= ### Front Matter @@ -727,6 +735,37 @@ This is a footnote reference.[^myref] [^myref]: This **is** the footnote definition. +Any preceding text after a footnote definitions, which is +indented by four or more spaces, will also be included in the footnote definition, e.g. + +```md +A longer footnote definition.[^mylongdef] + +[^mylongdef]: This is the footnote definition. + + That continues for all indented lines + + - even other block elements + + Plus any preceding unindented lines, +that are not separated by a blank line + +This is not part of the footnote. +``` + +A longer footnote definition.[^mylongdef] + +[^mylongdef]: This is the footnote definition. + + That continues for all indented lines + + - even other block elements + + Plus any preceding unindented lines, +that are not separated by a blank line + +This is not part of the footnote. + ````{important} Although footnote references can be used just fine within directives, e.g.[^myref], it it recommended that footnote definitions are not set within directives, @@ -745,21 +784,3 @@ unless they will only be referenced within that same directive: This is because, in the current implementation, they may not be available to reference in text above that particular directive. ```` - -````{note} -Currently, footnote definitions may only be on a single line. -However, it is intended in an update to come, that any preceding text which is -indented by four or more spaces, will also be included in the footnote definition, e.g. - -```md -[^myref]: This is the footnote definition. - - That continues for all indented lines - - Plus any precding unindented lines, -that are not separated by a blank line - -This is not part of the footnote. -``` - -```` diff --git a/myst_parser/cli/benchmark.py b/myst_parser/cli/benchmark.py index b55ef5ea..3cf5d075 100644 --- a/myst_parser/cli/benchmark.py +++ b/myst_parser/cli/benchmark.py @@ -5,6 +5,7 @@ from time import perf_counter ALL_PACKAGES = ( + # "panflute", "python-markdown:extra", "commonmark.py", "mistletoe", @@ -57,6 +58,11 @@ def run_mistletoe(package, text): return package.markdown(text) +@benchmark("panflute") +def run_panflute(package, text): + return package.convert_text(text, input_format="markdown", output_format="html") + + @benchmark("markdown_it") def run_markdown_it_py(package, text): md = package.MarkdownIt("commonmark") diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 797491b6..208f2a03 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -44,6 +44,7 @@ class DocutilsRenderer: __output__ = "docutils" def __init__(self, parser: MarkdownIt): + """Load the renderer (called by ``MarkdownIt``)""" self.md = parser self.rules = { k: v diff --git a/myst_parser/sphinx_renderer.py b/myst_parser/sphinx_renderer.py index 302bd18e..d5e80b40 100644 --- a/myst_parser/sphinx_renderer.py +++ b/myst_parser/sphinx_renderer.py @@ -27,7 +27,7 @@ class SphinxRenderer(DocutilsRenderer): """ def handle_cross_reference(self, token, destination): - + """Create nodes for references that are not immediately resolvable.""" wrap_node = addnodes.pending_xref( reftarget=unquote(destination), reftype="any", @@ -46,6 +46,7 @@ def handle_cross_reference(self, token, destination): self.render_children(token) def render_math_block_eqno(self, token): + """Render math with referencable labels, e.g. ``$a=1$ (label)``.""" label = token.info content = token.content node = nodes.math_block( diff --git a/setup.py b/setup.py index c09aa653..836f7947 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.3"], + install_requires=["markdown-it-py~=0.3.2"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index cdcf56c1..bdad8be3 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -275,6 +275,27 @@ $$foo$$ (abc) foo . +-------------------------- +Math Block multiple: +. +$$ +a = 1 +$$ + +$$ +b = 2 +$$ (a) +. +<document source="notset"> + <math_block nowrap="False" number="True" xml:space="preserve"> + + a = 1 + <target ids="equation-a"> + <math_block docname="mock_docname" label="a" nowrap="False" number="1" xml:space="preserve"> + + b = 2 +. + -------------------------- Sphinx Role containing backtick: . diff --git a/tests/test_sphinx/test_sphinx_builds/test_basic.xml b/tests/test_sphinx/test_sphinx_builds/test_basic.xml index 1ec5b60e..8665e540 100644 --- a/tests/test_sphinx/test_sphinx_builds/test_basic.xml +++ b/tests/test_sphinx/test_sphinx_builds/test_basic.xml @@ -37,6 +37,8 @@ <strong> <math> a=1 + <math_block nowrap="False" number="True" xml:space="preserve"> + b=2 <target refid="equation-eq-label"> <math_block docname="content" ids="equation-eq-label" label="eq:label" nowrap="False" number="1" xml:space="preserve"> c=2 From 615d052e046195c9427bab3e6ba06cb892fd57c3 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 13:33:44 +0000 Subject: [PATCH 11/32] test fix --- tests/test_sphinx/test_sphinx_builds/test_basic.html | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_sphinx/test_sphinx_builds/test_basic.html b/tests/test_sphinx/test_sphinx_builds/test_basic.html index d2f0a87d..7060d268 100644 --- a/tests/test_sphinx/test_sphinx_builds/test_basic.html +++ b/tests/test_sphinx/test_sphinx_builds/test_basic.html @@ -76,6 +76,9 @@ <h1> </span> </strong> </p> + <div class="math notranslate nohighlight"> + \[b=2\] + </div> <div class="math notranslate nohighlight" id="equation-eq-label"> <span class="eqno"> (1) From 7d98dbf4adafe7dfa507b0f8be96c2242a0cd8cd Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 13:38:39 +0000 Subject: [PATCH 12/32] fix docs --- docs/api/sphinx_parser.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/sphinx_parser.rst b/docs/api/sphinx_parser.rst index 2deb9a8e..f7334b3e 100644 --- a/docs/api/sphinx_parser.rst +++ b/docs/api/sphinx_parser.rst @@ -3,7 +3,7 @@ Sphinx Parser ------------- -This class builds on the :py:class:`~myst_parser.docutils_renderer.SphinxRenderer` +This class builds on the :py:class:`~myst_parser.sphinx_renderer.SphinxRenderer` to generate a parser for Sphinx, using the :ref:`Sphinx parser API <sphinx:parser-api>`: .. autoclass:: myst_parser.sphinx_parser.MystParser From 0a1d53e1800fb29017ee75d09483345626f80b23 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 15:29:42 +0000 Subject: [PATCH 13/32] Update documentation --- docs/develop/test_infrastructure.md | 2 +- docs/using/benchmark.md | 997 ---------------------------- docs/using/index.md | 4 +- docs/using/install.md | 36 - docs/using/intro.md | 72 ++ docs/using/sphinx.md | 7 - docs/using/use_api.md | 372 +++++------ myst_parser/main.py | 34 +- myst_parser/sphinx_renderer.py | 4 +- 9 files changed, 273 insertions(+), 1255 deletions(-) delete mode 100644 docs/using/benchmark.md delete mode 100644 docs/using/install.md create mode 100644 docs/using/intro.md delete mode 100644 docs/using/sphinx.md diff --git a/docs/develop/test_infrastructure.md b/docs/develop/test_infrastructure.md index 03a4058b..536ecbd0 100644 --- a/docs/develop/test_infrastructure.md +++ b/docs/develop/test_infrastructure.md @@ -8,7 +8,7 @@ manner: > **Write failing tests that the code should pass, then write code to pass the tests**. -The tests are run using [pytest](https://docs.pytest.org)/[TravisCI](https://travis-ci.org) for unit tests, and [sphinx-build](https://www.sphinx-doc.org/en/master/man/sphinx-build.html)/[CircleCI](https://circleci.com) for documentation build tests. +The tests are run using [pytest](https://docs.pytest.org)/[GitHub Actions](https://github.com/features/actions) for unit tests, and [sphinx-build](https://www.sphinx-doc.org/en/master/man/sphinx-build.html)/[CircleCI](https://circleci.com) for documentation build tests. The tests are ordered in a hierarchical fashion: diff --git a/docs/using/benchmark.md b/docs/using/benchmark.md deleted file mode 100644 index 77e97de0..00000000 --- a/docs/using/benchmark.md +++ /dev/null @@ -1,997 +0,0 @@ -Parsing Performance Benchmark -============================= - -The following document describes Markdown syntax, [as written by John Gruber][src]. -It is used to benchmark the parsing speed of the MyST-Parser against -some existing Markdown parsers written in Python: - - [src]: https://daringfireball.net/projects/markdown/syntax - - $ myst-benchmark docs/using/benchmark.md - Test document: benchmark.md - Test iterations: 1000 - Running tests ... - ================= - python-markdown:extra (3.2): 53.78 s - mistune (0.8.4): 15.22 s - commonmark.py (0.9.1): 61.92 s - mistletoe (0.8.0): 43.51 s - myst_parser:html (0.2.0): 52.47 s - myst_parser:docutils (0.2.0): 65.27 s - myst_parser:sphinx (0.2.0): 73.54 s - -As already noted by [mistletoe](https://github.com/miyuchina/mistletoe#performance) -(which this package is built on), although Mistune is the fastest, -this is because it does not strictly follow the CommonMark spec, -which outlines a highly context-sensitive grammar for Markdown. -The simpler approach taken by Mistune means that it cannot handle more -complex parsing cases, such as precedence of different types of tokens, escaping rules, etc. - -The MyST parser is slightly slower than the base mistletoe parser, due to the additional -syntax which it parses. Then the conversion to docutils AST takes some more time, -but is still comparably performant to the core CommonMark.py parser. The sphinx -parse takes some extra time, due to loading the full sphinx environment, -including its roles and directives. - -## Contents - -* [Overview](#overview) - * [Philosophy](#philosophy) - * [Inline HTML](#html) - * [Automatic Escaping for Special Characters](#autoescape) -* [Block Elements](#block) - * [Paragraphs and Line Breaks](#p) - * [Headers](#header) - * [Blockquotes](#blockquote) - * [Lists](#list) - * [Code Blocks](#precode) - * [Horizontal Rules](#hr) -* [Span Elements](#span) - * [Links](#link) - * [Emphasis](#em) - * [Code](#code) - * [Images](#img) -* [Miscellaneous](#misc) - * [Backslash Escapes](#backslash) - * [Automatic Links](#autolink) - -* * * - -<h2 id="overview">Overview</h2> - -<h3 id="philosophy">Philosophy</h3> - -Markdown is intended to be as easy-to-read and easy-to-write as is feasible. - -Readability, however, is emphasized above all else. A Markdown-formatted -document should be publishable as-is, as plain text, without looking -like it's been marked up with tags or formatting instructions. While -Markdown's syntax has been influenced by several existing text-to-HTML -filters -- including [Setext] [1], [atx] [2], [Textile] [3], [reStructuredText] [4], -[Grutatext] [5], and [EtText] [6] -- the single biggest source of -inspiration for Markdown's syntax is the format of plain text email. - - [1]: http://docutils.sourceforge.net/mirror/setext.html - [2]: http://www.aaronsw.com/2002/atx/ - [3]: http://textism.com/tools/textile/ - [4]: http://docutils.sourceforge.net/rst.html - [5]: http://www.triptico.com/software/grutatxt.html - [6]: http://ettext.taint.org/doc/ - -To this end, Markdown's syntax is comprised entirely of punctuation -characters, which punctuation characters have been carefully chosen so -as to look like what they mean. E.g., asterisks around a word actually -look like \*emphasis\*. Markdown lists look like, well, lists. Even -blockquotes look like quoted passages of text, assuming you've ever -used email. - -<h3 id="html">Inline HTML</h3> - -Markdown's syntax is intended for one purpose: to be used as a -format for *writing* for the web. - -Markdown is not a replacement for HTML, or even close to it. Its -syntax is very small, corresponding only to a very small subset of -HTML tags. The idea is *not* to create a syntax that makes it easier -to insert HTML tags. In my opinion, HTML tags are already easy to -insert. The idea for Markdown is to make it easy to read, write, and -edit prose. HTML is a *publishing* format; Markdown is a *writing* -format. Thus, Markdown's formatting syntax only addresses issues that -can be conveyed in plain text. - -For any markup that is not covered by Markdown's syntax, you simply -use HTML itself. There's no need to preface it or delimit it to -indicate that you're switching from Markdown to HTML; you just use -the tags. - -The only restrictions are that block-level HTML elements -- e.g. `<div>`, -`<table>`, `<pre>`, `<p>`, etc. -- must be separated from surrounding -content by blank lines, and the start and end tags of the block should -not be indented with tabs or spaces. Markdown is smart enough not -to add extra (unwanted) `<p>` tags around HTML block-level tags. - -For example, to add an HTML table to a Markdown article: - - This is a regular paragraph. - - <table> - <tr> - <td>Foo</td> - </tr> - </table> - - This is another regular paragraph. - -Note that Markdown formatting syntax is not processed within block-level -HTML tags. E.g., you can't use Markdown-style `*emphasis*` inside an -HTML block. - -Span-level HTML tags -- e.g. `<span>`, `<cite>`, or `<del>` -- can be -used anywhere in a Markdown paragraph, list item, or header. If you -want, you can even use HTML tags instead of Markdown formatting; e.g. if -you'd prefer to use HTML `<a>` or `<img>` tags instead of Markdown's -link or image syntax, go right ahead. - -Unlike block-level HTML tags, Markdown syntax *is* processed within -span-level tags. - - -<h3 id="autoescape">Automatic Escaping for Special Characters</h3> - -In HTML, there are two characters that demand special treatment: `<` -and `&`. Left angle brackets are used to start tags; ampersands are -used to denote HTML entities. If you want to use them as literal -characters, you must escape them as entities, e.g. `<`, and -`&`. - -Ampersands in particular are bedeviling for web writers. If you want to -write about 'AT&T', you need to write '`AT&T`'. You even need to -escape ampersands within URLs. Thus, if you want to link to: - - http://images.google.com/images?num=30&q=larry+bird - -you need to encode the URL as: - - http://images.google.com/images?num=30&q=larry+bird - -in your anchor tag `href` attribute. Needless to say, this is easy to -forget, and is probably the single most common source of HTML validation -errors in otherwise well-marked-up web sites. - -Markdown allows you to use these characters naturally, taking care of -all the necessary escaping for you. If you use an ampersand as part of -an HTML entity, it remains unchanged; otherwise it will be translated -into `&`. - -So, if you want to include a copyright symbol in your article, you can write: - - © - -and Markdown will leave it alone. But if you write: - - AT&T - -Markdown will translate it to: - - AT&T - -Similarly, because Markdown supports [inline HTML](#html), if you use -angle brackets as delimiters for HTML tags, Markdown will treat them as -such. But if you write: - - 4 < 5 - -Markdown will translate it to: - - 4 < 5 - -However, inside Markdown code spans and blocks, angle brackets and -ampersands are *always* encoded automatically. This makes it easy to use -Markdown to write about HTML code. (As opposed to raw HTML, which is a -terrible format for writing about HTML syntax, because every single `<` -and `&` in your example code needs to be escaped.) - - -* * * - - -<h2 id="block">Block Elements</h2> - - -<h3 id="p">Paragraphs and Line Breaks</h3> - -A paragraph is simply one or more consecutive lines of text, separated -by one or more blank lines. (A blank line is any line that looks like a -blank line -- a line containing nothing but spaces or tabs is considered -blank.) Normal paragraphs should not be indented with spaces or tabs. - -The implication of the "one or more consecutive lines of text" rule is -that Markdown supports "hard-wrapped" text paragraphs. This differs -significantly from most other text-to-HTML formatters (including Movable -Type's "Convert Line Breaks" option) which translate every line break -character in a paragraph into a `<br />` tag. - -When you *do* want to insert a `<br />` break tag using Markdown, you -end a line with two or more spaces, then type return. - -Yes, this takes a tad more effort to create a `<br />`, but a simplistic -"every line break is a `<br />`" rule wouldn't work for Markdown. -Markdown's email-style [blockquoting][bq] and multi-paragraph [list items][l] -work best -- and look better -- when you format them with hard breaks. - - [bq]: #blockquote - [l]: #list - - - -<h3 id="header">Headers</h3> - -Markdown supports two styles of headers, [Setext] [1] and [atx] [2]. - -Setext-style headers are "underlined" using equal signs (for first-level -headers) and dashes (for second-level headers). For example: - - This is an H1 - ============= - - This is an H2 - ------------- - -This is an H2 -------------- - -Any number of underlining `=`'s or `-`'s will work. - -Atx-style headers use 1-6 hash characters at the start of the line, -corresponding to header levels 1-6. For example: - - # This is an H1 - - ## This is an H2 - - ###### This is an H6 - -Optionally, you may "close" atx-style headers. This is purely -cosmetic -- you can use this if you think it looks better. The -closing hashes don't even need to match the number of hashes -used to open the header. (The number of opening hashes -determines the header level.) : - - # This is an H1 # - - ## This is an H2 ## - - ### This is an H3 ###### - - -<h3 id="blockquote">Blockquotes</h3> - -Markdown uses email-style `>` characters for blockquoting. If you're -familiar with quoting passages of text in an email message, then you -know how to create a blockquote in Markdown. It looks best if you hard -wrap the text and put a `>` before every line: - - > This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet, - > consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus. - > Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. - > - > Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse - > id sem consectetuer libero luctus adipiscing. - -* * * - -> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet, -> consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus. -> Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. -> -> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse -> id sem consectetuer libero luctus adipiscing. - -* * * - -Markdown allows you to be lazy and only put the `>` before the first -line of a hard-wrapped paragraph: - - > This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet, - consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus. - Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. - - > Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse - id sem consectetuer libero luctus adipiscing. - -* * * - -> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet, -consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus. -Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. - -> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse -id sem consectetuer libero luctus adipiscing. - -* * * - -Blockquotes can be nested (i.e. a blockquote-in-a-blockquote) by -adding additional levels of `>`: - - > This is the first level of quoting. - > - > > This is nested blockquote. - > - > Back to the first level. - -* * * - -> This is the first level of quoting. -> -> > This is nested blockquote. -> -> Back to the first level. - -* * * - -Blockquotes can contain other Markdown elements, including headers, lists, -and code blocks: - - > ## This is a header. - > - > 1. This is the first list item. - > 2. This is the second list item. - > - > Here's some example code: - > - > return shell_exec("echo $input | $markdown_script"); - -Any decent text editor should make email-style quoting easy. For -example, with BBEdit, you can make a selection and choose Increase -Quote Level from the Text menu. - - -<h3 id="list">Lists</h3> - -Markdown supports ordered (numbered) and unordered (bulleted) lists. - -Unordered lists use asterisks, pluses, and hyphens -- interchangably --- as list markers: - - * Red - * Green - * Blue - -* * * - -* Red -* Green -* Blue - -* * * - -is equivalent to: - - + Red - + Green - + Blue - -* * * - -+ Red -+ Green -+ Blue - -* * * - -and: - - - Red - - Green - - Blue - -* * * - -- Red -- Green -- Blue - -* * * - -Ordered lists use numbers followed by periods: - - 1. Bird - 2. McHale - 3. Parish - -* * * - -1. Bird -2. McHale -3. Parish - -* * * - -It's important to note that the actual numbers you use to mark the -list have no effect on the HTML output Markdown produces. The HTML -Markdown produces from the above list is: - - <ol> - <li>Bird</li> - <li>McHale</li> - <li>Parish</li> - </ol> - -If you instead wrote the list in Markdown like this: - - 1. Bird - 2. McHale - 3. Parish - -or even: - - 3. Bird - 1. McHale - 8. Parish - -* * * - -1. Bird -2. McHale -3. Parish - -* * * - -you'd get the exact same HTML output. The point is, if you want to, -you can use ordinal numbers in your ordered Markdown lists, so that -the numbers in your source match the numbers in your published HTML. -But if you want to be lazy, you don't have to. - -If you do use lazy list numbering, however, you should still start the -list with the number 1. At some point in the future, Markdown may support -starting ordered lists at an arbitrary number. - -List markers typically start at the left margin, but may be indented by -up to three spaces. List markers must be followed by one or more spaces -or a tab. - -To make lists look nice, you can wrap items with hanging indents: - - * Lorem ipsum dolor sit amet, consectetuer adipiscing elit. - Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi, - viverra nec, fringilla in, laoreet vitae, risus. - * Donec sit amet nisl. Aliquam semper ipsum sit amet velit. - Suspendisse id sem consectetuer libero luctus adipiscing. - -But if you want to be lazy, you don't have to: - - * Lorem ipsum dolor sit amet, consectetuer adipiscing elit. - Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi, - viverra nec, fringilla in, laoreet vitae, risus. - * Donec sit amet nisl. Aliquam semper ipsum sit amet velit. - Suspendisse id sem consectetuer libero luctus adipiscing. - -If list items are separated by blank lines, Markdown will wrap the -items in `<p>` tags in the HTML output. For example, this input: - - * Bird - * Magic - -will turn into: - - <ul> - <li>Bird</li> - <li>Magic</li> - </ul> - -But this: - - * Bird - - * Magic - -will turn into: - - <ul> - <li><p>Bird</p></li> - <li><p>Magic</p></li> - </ul> - -List items may consist of multiple paragraphs. Each subsequent -paragraph in a list item must be indented by either 4 spaces -or one tab: - - 1. This is a list item with two paragraphs. Lorem ipsum dolor - sit amet, consectetuer adipiscing elit. Aliquam hendrerit - mi posuere lectus. - - Vestibulum enim wisi, viverra nec, fringilla in, laoreet - vitae, risus. Donec sit amet nisl. Aliquam semper ipsum - sit amet velit. - - 2. Suspendisse id sem consectetuer libero luctus adipiscing. - -It looks nice if you indent every line of the subsequent -paragraphs, but here again, Markdown will allow you to be -lazy: - - * This is a list item with two paragraphs. - - This is the second paragraph in the list item. You're - only required to indent the first line. Lorem ipsum dolor - sit amet, consectetuer adipiscing elit. - - * Another item in the same list. - -To put a blockquote within a list item, the blockquote's `>` -delimiters need to be indented: - - * A list item with a blockquote: - - > This is a blockquote - > inside a list item. - -* * * - -* A list item with a blockquote: - - > This is a blockquote - > inside a list item. - -* * * - -To put a code block within a list item, the code block needs -to be indented *twice* -- 8 spaces or two tabs: - - * A list item with a code block: - - <code goes here> - - -It's worth noting that it's possible to trigger an ordered list by -accident, by writing something like this: - - 1. What a great season. - -In other words, a *number-period-space* sequence at the beginning of a -line. To avoid this, you can backslash-escape the period: - - 1986\. What a great season. - - - -<h3 id="precode">Code Blocks</h3> - -Pre-formatted code blocks are used for writing about programming or -markup source code. Rather than forming normal paragraphs, the lines -of a code block are interpreted literally. Markdown wraps a code block -in both `<pre>` and `<code>` tags. - -To produce a code block in Markdown, simply indent every line of the -block by at least 4 spaces or 1 tab. For example, given this input: - - This is a normal paragraph: - - This is a code block. - -Markdown will generate: - - <p>This is a normal paragraph:</p> - - <pre><code>This is a code block. - </code></pre> - -One level of indentation -- 4 spaces or 1 tab -- is removed from each -line of the code block. For example, this: - - Here is an example of AppleScript: - - tell application "Foo" - beep - end tell - -will turn into: - - <p>Here is an example of AppleScript:</p> - - <pre><code>tell application "Foo" - beep - end tell - </code></pre> - -A code block continues until it reaches a line that is not indented -(or the end of the article). - -Within a code block, ampersands (`&`) and angle brackets (`<` and `>`) -are automatically converted into HTML entities. This makes it very -easy to include example HTML source code using Markdown -- just paste -it and indent it, and Markdown will handle the hassle of encoding the -ampersands and angle brackets. For example, this: - - <div class="footer"> - © 2004 Foo Corporation - </div> - -will turn into: - - <pre><code><div class="footer"> - &copy; 2004 Foo Corporation - </div> - </code></pre> - -Regular Markdown syntax is not processed within code blocks. E.g., -asterisks are just literal asterisks within a code block. This means -it's also easy to use Markdown to write about Markdown's own syntax. - - - -<h3 id="hr">Horizontal Rules</h3> - -You can produce a horizontal rule tag (`<hr />`) by placing three or -more hyphens, asterisks, or underscores on a line by themselves. If you -wish, you may use spaces between the hyphens or asterisks. Each of the -following lines will produce a horizontal rule: - - * * * - - *** - - ***** - - - - - - - --------------------------------------- - - -* * * - -<h2 id="span">Span Elements</h2> - -<h3 id="link">Links</h3> - -Markdown supports two style of links: *inline* and *reference*. - -In both styles, the link text is delimited by [square brackets]. - -To create an inline link, use a set of regular parentheses immediately -after the link text's closing square bracket. Inside the parentheses, -put the URL where you want the link to point, along with an *optional* -title for the link, surrounded in quotes. For example: - - This is [an example](http://example.com/ "Title") inline link. - - [This link](http://example.net/) has no title attribute. - -Will produce: - - <p>This is <a href="http://example.com/" title="Title"> - an example</a> inline link.</p> - - <p><a href="http://example.net/">This link</a> has no - title attribute.</p> - -If you're referring to a local resource on the same server, you can -use relative paths: - - See my [About](/about/) page for details. - -Reference-style links use a second set of square brackets, inside -which you place a label of your choosing to identify the link: - - This is [an example][id] reference-style link. - -You can optionally use a space to separate the sets of brackets: - - This is [an example] [id] reference-style link. - -Then, anywhere in the document, you define your link label like this, -on a line by itself: - - [id]: http://example.com/ "Optional Title Here" - -That is: - -* Square brackets containing the link identifier (optionally - indented from the left margin using up to three spaces); -* followed by a colon; -* followed by one or more spaces (or tabs); -* followed by the URL for the link; -* optionally followed by a title attribute for the link, enclosed - in double or single quotes, or enclosed in parentheses. - -The following three link definitions are equivalent: - - [foo]: http://example.com/ "Optional Title Here" - [foo]: http://example.com/ 'Optional Title Here' - [foo]: http://example.com/ (Optional Title Here) - -**Note:** There is a known bug in Markdown.pl 1.0.1 which prevents -single quotes from being used to delimit link titles. - -The link URL may, optionally, be surrounded by angle brackets: - - [id]: <http://example.com/> "Optional Title Here" - -You can put the title attribute on the next line and use extra spaces -or tabs for padding, which tends to look better with longer URLs: - - [id]: http://example.com/longish/path/to/resource/here - "Optional Title Here" - -Link definitions are only used for creating links during Markdown -processing, and are stripped from your document in the HTML output. - -Link definition names may consist of letters, numbers, spaces, and -punctuation -- but they are *not* case sensitive. E.g. these two -links: - - [link text][a] - [link text][A] - -are equivalent. - -The *implicit link name* shortcut allows you to omit the name of the -link, in which case the link text itself is used as the name. -Just use an empty set of square brackets -- e.g., to link the word -"Google" to the google.com web site, you could simply write: - - [Google][] - -And then define the link: - - [Google]: http://google.com/ - -Because link names may contain spaces, this shortcut even works for -multiple words in the link text: - - Visit [Daring Fireball][] for more information. - -And then define the link: - - [Daring Fireball]: http://daringfireball.net/ - -Link definitions can be placed anywhere in your Markdown document. I -tend to put them immediately after each paragraph in which they're -used, but if you want, you can put them all at the end of your -document, sort of like footnotes. - -Here's an example of reference links in action: - - I get 10 times more traffic from [Google] [1] than from - [Yahoo] [2] or [MSN] [3]. - - [1]: http://google.com/ "Google" - [2]: http://search.yahoo.com/ "Yahoo Search" - [3]: http://search.msn.com/ "MSN Search" - -Using the implicit link name shortcut, you could instead write: - - I get 10 times more traffic from [Google][] than from - [Yahoo][] or [MSN][]. - - [google]: http://google.com/ "Google" - [yahoo]: http://search.yahoo.com/ "Yahoo Search" - [msn]: http://search.msn.com/ "MSN Search" - -Both of the above examples will produce the following HTML output: - - <p>I get 10 times more traffic from <a href="http://google.com/" - title="Google">Google</a> than from - <a href="http://search.yahoo.com/" title="Yahoo Search">Yahoo</a> - or <a href="http://search.msn.com/" title="MSN Search">MSN</a>.</p> - -For comparison, here is the same paragraph written using -Markdown's inline link style: - - I get 10 times more traffic from [Google](http://google.com/ "Google") - than from [Yahoo](http://search.yahoo.com/ "Yahoo Search") or - [MSN](http://search.msn.com/ "MSN Search"). - -The point of reference-style links is not that they're easier to -write. The point is that with reference-style links, your document -source is vastly more readable. Compare the above examples: using -reference-style links, the paragraph itself is only 81 characters -long; with inline-style links, it's 176 characters; and as raw HTML, -it's 234 characters. In the raw HTML, there's more markup than there -is text. - -With Markdown's reference-style links, a source document much more -closely resembles the final output, as rendered in a browser. By -allowing you to move the markup-related metadata out of the paragraph, -you can add links without interrupting the narrative flow of your -prose. - - -<h3 id="em">Emphasis</h3> - -Markdown treats asterisks (`*`) and underscores (`_`) as indicators of -emphasis. Text wrapped with one `*` or `_` will be wrapped with an -HTML `<em>` tag; double `*`'s or `_`'s will be wrapped with an HTML -`<strong>` tag. E.g., this input: - - *single asterisks* - - _single underscores_ - - **double asterisks** - - __double underscores__ - -will produce: - - <em>single asterisks</em> - - <em>single underscores</em> - - <strong>double asterisks</strong> - - <strong>double underscores</strong> - -You can use whichever style you prefer; the lone restriction is that -the same character must be used to open and close an emphasis span. - -Emphasis can be used in the middle of a word: - - un*frigging*believable - -But if you surround an `*` or `_` with spaces, it'll be treated as a -literal asterisk or underscore. - -To produce a literal asterisk or underscore at a position where it -would otherwise be used as an emphasis delimiter, you can backslash -escape it: - - \*this text is surrounded by literal asterisks\* - - - -<h3 id="code">Code</h3> - -To indicate a span of code, wrap it with backtick quotes (`` ` ``). -Unlike a pre-formatted code block, a code span indicates code within a -normal paragraph. For example: - - Use the `printf()` function. - -will produce: - - <p>Use the <code>printf()</code> function.</p> - -To include a literal backtick character within a code span, you can use -multiple backticks as the opening and closing delimiters: - - ``There is a literal backtick (`) here.`` - -which will produce this: - - <p><code>There is a literal backtick (`) here.</code></p> - -The backtick delimiters surrounding a code span may include spaces -- -one after the opening, one before the closing. This allows you to place -literal backtick characters at the beginning or end of a code span: - - A single backtick in a code span: `` ` `` - - A backtick-delimited string in a code span: `` `foo` `` - -will produce: - - <p>A single backtick in a code span: <code>`</code></p> - - <p>A backtick-delimited string in a code span: <code>`foo`</code></p> - -With a code span, ampersands and angle brackets are encoded as HTML -entities automatically, which makes it easy to include example HTML -tags. Markdown will turn this: - - Please don't use any `<blink>` tags. - -into: - - <p>Please don't use any <code><blink></code> tags.</p> - -You can write this: - - `—` is the decimal-encoded equivalent of `—`. - -to produce: - - <p><code>&#8212;</code> is the decimal-encoded - equivalent of <code>&mdash;</code>.</p> - - - -<h3 id="img">Images</h3> - -Admittedly, it's fairly difficult to devise a "natural" syntax for -placing images into a plain text document format. - -Markdown uses an image syntax that is intended to resemble the syntax -for links, allowing for two styles: *inline* and *reference*. - -Inline image syntax looks like this: - - ![Alt text](/path/to/img.jpg) - - ![Alt text](/path/to/img.jpg "Optional title") - -That is: - -* An exclamation mark: `!`; -* followed by a set of square brackets, containing the `alt` - attribute text for the image; -* followed by a set of parentheses, containing the URL or path to - the image, and an optional `title` attribute enclosed in double - or single quotes. - -Reference-style image syntax looks like this: - - ![Alt text][id] - -Where "id" is the name of a defined image reference. Image references -are defined using syntax identical to link references: - - [id]: url/to/image "Optional title attribute" - -As of this writing, Markdown has no syntax for specifying the -dimensions of an image; if this is important to you, you can simply -use regular HTML `<img>` tags. - - -* * * - - -<h2 id="misc">Miscellaneous</h2> - -<h3 id="autolink">Automatic Links</h3> - -Markdown supports a shortcut style for creating "automatic" links for URLs and email addresses: simply surround the URL or email address with angle brackets. What this means is that if you want to show the actual text of a URL or email address, and also have it be a clickable link, you can do this: - - <http://example.com/> - -Markdown will turn this into: - - <a href="http://example.com/">http://example.com/</a> - -Automatic links for email addresses work similarly, except that -Markdown will also perform a bit of randomized decimal and hex -entity-encoding to help obscure your address from address-harvesting -spambots. For example, Markdown will turn this: - - <address@example.com> - -into something like this: - - <a href="mailto:addre - ss@example.co - m">address@exa - mple.com</a> - -which will render in a browser as a clickable link to "address@example.com". - -(This sort of entity-encoding trick will indeed fool many, if not -most, address-harvesting bots, but it definitely won't fool all of -them. It's better than nothing, but an address published in this way -will probably eventually start receiving spam.) - - - -<h3 id="backslash">Backslash Escapes</h3> - -Markdown allows you to use backslash escapes to generate literal -characters which would otherwise have special meaning in Markdown's -formatting syntax. For example, if you wanted to surround a word -with literal asterisks (instead of an HTML `<em>` tag), you can use -backslashes before the asterisks, like this: - - \*literal asterisks\* - -Markdown provides backslash escapes for the following characters: - - \ backslash - ` backtick - * asterisk - _ underscore - {} curly braces - [] square brackets - () parentheses - # hash mark - + plus sign - - minus sign (hyphen) - . dot - ! exclamation mark diff --git a/docs/using/index.md b/docs/using/index.md index fce812f6..0525eb1b 100644 --- a/docs/using/index.md +++ b/docs/using/index.md @@ -4,9 +4,7 @@ The following pages are examples meant to highlight the functionality of MyST documents. ```{toctree} -install.md +intro.md syntax.md -sphinx.md -benchmark.md use_api.md ``` diff --git a/docs/using/install.md b/docs/using/install.md deleted file mode 100644 index c19c4f2c..00000000 --- a/docs/using/install.md +++ /dev/null @@ -1,36 +0,0 @@ -# Installing the MyST Parser - -[![PyPI][pypi-badge]][pypi-link] -[![Conda][conda-badge]][conda-link] - -Installing the MyST parser provides access to two tools: - -* A MyST-to-docutils parser and renderer. -* A Sphinx parser that utilizes the above tool in building your documenation. - -To install the MyST parser, run the following in a -[Conda environment](https://docs.conda.io) (recommended): - -```bash -conda install -c conda-forge myst-parser -``` - -or - -```bash -pip install myst-parser[sphinx] -``` - -Or for package development: - -```bash -git clone https://github.com/ExecutableBookProject/MyST-Parser -cd MyST-Parser -git checkout master -pip install -e .[sphinx,code_style,testing,rtd] -``` - -[pypi-badge]: https://img.shields.io/pypi/v/myst-parser.svg -[pypi-link]: https://pypi.org/project/myst-parser -[conda-badge]: https://anaconda.org/conda-forge/myst-parser/badges/version.svg -[conda-link]: https://anaconda.org/conda-forge/myst-parser diff --git a/docs/using/intro.md b/docs/using/intro.md new file mode 100644 index 00000000..8a3b1857 --- /dev/null +++ b/docs/using/intro.md @@ -0,0 +1,72 @@ +# Getting Started + +## Installation + +[![PyPI][pypi-badge]][pypi-link] +[![Conda][conda-badge]][conda-link] + +Installing the MyST parser provides access to two tools: + +* A MyST-to-docutils parser and renderer. +* A Sphinx parser that utilizes the above tool in building your documenation. + +To install the MyST parser, run the following in a +[Conda environment](https://docs.conda.io) (recommended): + +```bash +conda install -c conda-forge myst-parser +``` + +or + +```bash +pip install myst-parser[sphinx] +``` + +Or for package development: + +```bash +git clone https://github.com/ExecutableBookProject/MyST-Parser +cd MyST-Parser +git checkout master +pip install -e .[sphinx,code_style,testing,rtd] +``` + +[pypi-badge]: https://img.shields.io/pypi/v/myst-parser.svg +[pypi-link]: https://pypi.org/project/myst-parser +[conda-badge]: https://anaconda.org/conda-forge/myst-parser/badges/version.svg +[conda-link]: https://anaconda.org/conda-forge/myst-parser + +## Parsing MyST with Sphinx + +Sphinx is a documentation generator for building a website or book from multiple source documents and assets. To get started with Sphinx, see their [Quickstart Guide](https://www.sphinx-doc.org/en/master/usage/quickstart.html). + +To use the MyST parser in Sphinx, simply add: `extensions = ["myst_parser"]` to your `conf.py` and all documents with the `.md` extension will be parsed as MyST. + +Naturally this site is generated with Sphinx and MyST! + +## Parsing Performance Benchmark + +MyST-Parser uses the fastest, __*CommonMark compliant*__, parser written in python! + + $ myst-benchmark -n 50 + Test document: spec.md + Test iterations: 50 + Running 6 test(s) ... + ===================== + [mistune (0.8.4): 5.52 s]* + markdown-it-py (0.2.3): 15.38 s + myst-parser:sphinx (0.8.0): 23.13 s + mistletoe (0.10.0): 16.92 s + commonmark.py (0.9.1): 35.61 s + python-markdown:extra (3.2.1): 66.89 s + +As already noted by [mistletoe](https://github.com/miyuchina/mistletoe#performance), +although Mistune is the fastest of the parsers, +this is because it does not strictly follow the CommonMark spec, +which outlines a highly context-sensitive grammar for Markdown. +The simpler approach taken by Mistune means that it cannot handle more +complex parsing cases, such as precedence of different types of tokens, escaping rules, etc. + +The MyST parser is slightly slower than the base markdown-it-py parser, due to the additional syntax which it parses and the conversion to docutils AST, +but even then it is still comparably performant to the other parsers parser. diff --git a/docs/using/sphinx.md b/docs/using/sphinx.md deleted file mode 100644 index 1600c902..00000000 --- a/docs/using/sphinx.md +++ /dev/null @@ -1,7 +0,0 @@ -# Parsing MyST with Sphinx - -Sphinx is a documentation generator for building a website or book from multiple source documents and assets. To get started with Sphinx, see their [Quickstart Guide](https://www.sphinx-doc.org/en/master/usage/quickstart.html). - -To use the MyST parser in Sphinx, simply add: `extensions = ["myst_parser"]` to your `conf.py` and all documents with the `.md` extension will be parsed as MyST. - -Naturally this site is generated with Sphinx and MyST! diff --git a/docs/using/use_api.md b/docs/using/use_api.md index 1f27ac55..7fb33aae 100644 --- a/docs/using/use_api.md +++ b/docs/using/use_api.md @@ -1,252 +1,245 @@ # Using `myst_parser` as an API -% TODO eventually this should be wrote as a notebook (with MyST-NB)! - MyST-Parser may be used as an API *via* the `myst_parser` package. ```{seealso} -- {ref}`Programmatic Use of Mistletoe <mistletoe:intro/api_use>` +- The [markdown-it-py](https://github.com/ExecutableBookProject/markdown-it-py) package - {ref}`The MyST-Parser API <api/main>` ``` The raw text is first parsed to syntax 'tokens', then these are converted to other formats using 'renderers'. -The simplest way to parse text is using: + +## Quick-Start + +The simplest way to understand how text will be parsed is using: ```python -from myst_parser import parse_text -parse_text("some *text*", "html") +from myst_parser.main import to_html +to_html("some *text*") ``` +<!-- #region --> ```html '<p>some <em>text</em></p>\n' ``` +<!-- #endregion --> -The output type can be one of: +```python +from myst_parser.main import to_docutils +print(to_docutils("some *text*").pformat()) +``` -- `dict` (a.k.a ast) -- `html` -- `docutils` -- `sphinx` +```xml +<document source="notset"> + <paragraph> + some + <emphasis> + text +``` -## Convert Text to Tokens +```python +from pprint import pprint +from myst_parser.main import to_tokens -To convert some text to tokens: +for token in to_tokens("some *text*"): + print(token) + print() +``` +<!-- #region --> ```python -from myst_parser import text_to_tokens -root = text_to_tokens(""" -Here's some *text* +Token(type='paragraph_open', tag='p', nesting=1, attrs=None, map=[0, 1], level=0, children=None, content='', markup='', info='', meta={}, block=True, hidden=False) -1. a list +Token(type='inline', tag='', nesting=0, attrs=None, map=[0, 1], level=1, children=[Token(type='text', tag='', nesting=0, attrs=None, map=None, level=0, children=None, content='some ', markup='', info='', meta={}, block=False, hidden=False), Token(type='em_open', tag='em', nesting=1, attrs=None, map=None, level=0, children=None, content='', markup='*', info='', meta={}, block=False, hidden=False), Token(type='text', tag='', nesting=0, attrs=None, map=None, level=1, children=None, content='text', markup='', info='', meta={}, block=False, hidden=False), Token(type='em_close', tag='em', nesting=-1, attrs=None, map=None, level=0, children=None, content='', markup='*', info='', meta={}, block=False, hidden=False)], content='some *text*', markup='', info='', meta={}, block=True, hidden=False) -> a *quote*""") -root +Token(type='paragraph_close', tag='p', nesting=-1, attrs=None, map=None, level=0, children=None, content='', markup='', info='', meta={}, block=True, hidden=False) ``` +<!-- #endregion --> -```python -Document(children=3, link_definitions=0, front_matter=None) -``` +# The Parser -All non-terminal tokens may contain children: + +The `default_parser` function loads a standard markdown-it parser with the default syntax rules for MyST. ```python -root.children +from myst_parser.main import default_parser +parser = default_parser("html") +parser ``` +<!-- #region --> ```python -[Paragraph(children=2, position=(2, 2)), - List(children=1, loose=False, start_at=1, position=(3, 4)), - Quote(children=1, position=(6, 6))] +markdown_it.main.MarkdownIt() ``` +<!-- #endregion --> -Then each token has attributes specific to its type: +```python +pprint(parser.get_active_rules()) +``` +<!-- #region --> ```python -list_token = root.children[1] -list_token.__dict__ +{'block': ['front_matter', + 'table', + 'code', + 'math_block_eqno', + 'math_block', + 'fence', + 'myst_line_comment', + 'blockquote', + 'myst_block_break', + 'myst_target', + 'hr', + 'list', + 'footnote_def', + 'reference', + 'heading', + 'lheading', + 'html_block', + 'paragraph'], + 'core': ['normalize', 'block', 'inline'], + 'inline': ['text', + 'newline', + 'math_inline', + 'math_single', + 'escape', + 'myst_role', + 'backticks', + 'emphasis', + 'link', + 'image', + 'footnote_ref', + 'autolink', + 'html_inline', + 'entity'], + 'inline2': ['balance_pairs', 'emphasis', 'text_collapse']} ``` +<!-- #endregion --> ```python -{'children': [{'children': [{'children': [RawText()], 'position': [4, 4]}], - 'loose': False, - 'leader': '1.', - 'prepend': 3, - 'next_marker': None, - 'position': [3, 4]}], - 'loose': False, - 'start_at': 1, - 'position': [3, 4]} +parser.render("*abc*") ``` -You can also recursively traverse the syntax tree, yielding `WalkItem`s that contain the element, its parent and depth from the source token: +<!-- #region --> +```html +'<p><em>abc</em></p>\n' +``` +<!-- #endregion --> + +Any of these rules can be disabled: ```python -from pprint import pprint -tree = [ - (t.parent.__class__.__name__, t.node.__class__.__name__, t.depth) - for t in root.walk() -] -pprint(tree) +parser.disable("emphasis").render("*abc*") +``` + +<!-- #region --> +```html +'<p>*abc*</p>\n' ``` +<!-- #endregion --> + +`renderInline` turns off any block syntax rules. ```python -[('Document', 'Paragraph', 1), - ('Document', 'List', 1), - ('Document', 'Quote', 1), - ('Paragraph', 'RawText', 2), - ('Paragraph', 'Emphasis', 2), - ('List', 'ListItem', 2), - ('Quote', 'Paragraph', 2), - ('Emphasis', 'RawText', 3), - ('ListItem', 'Paragraph', 3), - ('Paragraph', 'RawText', 3), - ('Paragraph', 'Emphasis', 3), - ('Paragraph', 'RawText', 4), - ('Emphasis', 'RawText', 4)] +parser.enable("emphasis").renderInline("- *abc*") +``` + +<!-- #region --> +```html +'- <em>abc</em>' ``` +<!-- #endregion --> -## JSON Renderer +## The Token Stream -The `myst_parser.json_renderer.JsonRenderer` converts a token to a nested dictionary representation. + + + +The text is parsed to a flat token stream: ```python -from json import loads -from myst_parser import render_tokens -from myst_parser.json_renderer import JsonRenderer +from myst_parser.main import to_tokens +tokens = to_tokens(""" +Here's some *text* -pprint(loads(render_tokens(root, JsonRenderer))) +1. a list + +> a *quote*""") +[t.type for t in tokens] ``` +<!-- #region --> ```python -{'children': [{'children': [{'content': "Here's some ", - 'position': [2, 2], - 'type': 'RawText'}, - {'children': [{'content': 'text', - 'position': [2, 2], - 'type': 'RawText'}], - 'position': [2, 2], - 'type': 'Emphasis'}], - 'position': [2, 2], - 'type': 'Paragraph'}, - {'children': [{'children': [{'children': [{'content': 'a list', - 'position': [4, 4], - 'type': 'RawText'}], - 'position': [4, 4], - 'type': 'Paragraph'}], - 'leader': '1.', - 'loose': False, - 'next_marker': None, - 'position': [3, 4], - 'prepend': 3, - 'type': 'ListItem'}], - 'loose': False, - 'position': [3, 4], - 'start_at': 1, - 'type': 'List'}, - {'children': [{'children': [{'content': 'a ', - 'position': [7, 7], - 'type': 'RawText'}, - {'children': [{'content': 'quote', - 'position': [7, 7], - 'type': 'RawText'}], - 'position': [7, 7], - 'type': 'Emphasis'}], - 'position': [7, 7], - 'type': 'Paragraph'}], - 'position': [6, 6], - 'type': 'Quote'}], - 'front_matter': None, - 'link_definitions': {}, - 'type': 'Document'} -``` - -## HTML Renderer - -The `myst_parser.html_renderer.HTMLRenderer` converts a token directly to HTML. +['paragraph_open', + 'inline', + 'paragraph_close', + 'ordered_list_open', + 'list_item_open', + 'paragraph_open', + 'inline', + 'paragraph_close', + 'list_item_close', + 'ordered_list_close', + 'blockquote_open', + 'paragraph_open', + 'inline', + 'paragraph_close', + 'blockquote_close'] +``` +<!-- #endregion --> -```python -from myst_parser import render_tokens -from myst_parser.html_renderer import HTMLRenderer +Inline type tokens contain the inline tokens as children: -print(render_tokens(root, HTMLRenderer)) +```python +tokens[6] ``` -```html -<p>Here's some <em>text</em></p> -<ol> -<li>a list</li> -</ol> -<blockquote> -<p>a <em>quote</em></p> -</blockquote> +<!-- #region --> +```python +Token(type='inline', tag='', nesting=0, attrs=None, map=[3, 4], level=3, children=[Token(type='text', tag='', nesting=0, attrs=None, map=None, level=0, children=None, content='a list', markup='', info='', meta={}, block=False, hidden=False)], content='a list', markup='', info='', meta={}, block=True, hidden=False) ``` +<!-- #endregion --> -`````{note} -This render will not actually 'assess' roles and directives, -just represent their raw content: +The sphinx renderer first converts the token to a nested structure, collapsing the opening/closing tokens into single tokens: -````python -other = text_to_tokens(""" -{role:name}`content` +```python +from markdown_it.token import nest_tokens +nested = nest_tokens(tokens) +[t.type for t in nested] +``` -```{directive_name} arg -:option: a -content +<!-- #region --> +```python +['paragraph_open', 'ordered_list_open', 'blockquote_open'] ``` -""") +<!-- #endregion --> -print(render_tokens(other, HTMLRenderer)) -```` +```python +print(nested[0].opening, end="\n\n") +print(nested[0].closing, end="\n\n") +print(nested[0].children, end="\n\n") +``` -````html -<p><span class="myst-role"><code>{role:name}content</code></span></p> -<div class="myst-directive"> -<pre><code>{directive_name} arg -:option: a -content -</code></pre></span> -</div> -```` -````` +<!-- #region --> +```python +Token(type='paragraph_open', tag='p', nesting=1, attrs=None, map=[1, 2], level=0, children=None, content='', markup='', info='', meta={}, block=True, hidden=False) -You can also create a minmal page preview, including CSS: +Token(type='paragraph_close', tag='p', nesting=-1, attrs=None, map=None, level=0, children=None, content='', markup='', info='', meta={}, block=True, hidden=False) -```python -parse_text( - in_string, - "html", - add_mathjax=True, - as_standalone=True, - add_css=dedent( - """\ - div.myst-front-matter { - border: 1px solid gray; - } - div.myst-directive { - background: lightgreen; - } - hr.myst-block-break { - border-top:1px dotted black; - } - span.myst-role { - background: lightgreen; - } - """ - ), -) +[Token(type='inline', tag='', nesting=0, attrs=None, map=[1, 2], level=1, children=[Token(type='text', tag='', nesting=0, attrs=None, map=None, level=0, children=None, content="Here's some ", markup='', info='', meta={}, block=False, hidden=False), NestedTokens(opening=Token(type='em_open', tag='em', nesting=1, attrs=None, map=None, level=0, children=None, content='', markup='*', info='', meta={}, block=False, hidden=False), closing=Token(type='em_close', tag='em', nesting=-1, attrs=None, map=None, level=0, children=None, content='', markup='*', info='', meta={}, block=False, hidden=False), children=[Token(type='text', tag='', nesting=0, attrs=None, map=None, level=1, children=None, content='text', markup='', info='', meta={}, block=False, hidden=False)])], content="Here's some *text*", markup='', info='', meta={}, block=True, hidden=False)] ``` +<!-- #endregion --> -## Docutils Renderer +## Renderers The `myst_parser.docutils_renderer.DocutilsRenderer` converts a token directly to the `docutils.document` representation of the document, converting roles and directives to a `docutils.nodes` if a converter can be found for the given name. -````python -from myst_parser import render_tokens -from myst_parser.docutils_renderer import DocutilsRenderer +```python +from myst_parser.main import to_docutils -root = text_to_tokens(""" +document = to_docutils(""" Here's some *text* 1. a list @@ -260,9 +253,8 @@ content ``` """) -document = render_tokens(root, DocutilsRenderer) print(document.pformat()) -```` +``` ```xml <document source="notset"> @@ -287,19 +279,13 @@ print(document.pformat()) content ``` -## Sphinx Renderer -The `myst_parser.docutils_renderer.SphinxRenderer` builds on the `DocutilsRenderer` to add sphinx specific nodes, e.g. for cross-referencing between documents. +The `myst_parser.sphinx_renderer.SphinxRenderer` builds on the `DocutilsRenderer` to add sphinx specific nodes, e.g. for cross-referencing between documents. -```{note} -To use sphinx specific roles and directives outside of a `sphinx-build`, they must first be loaded with the `load_sphinx_env=True` option. -``` +To use the sphinx specific roles and directives outside of a `sphinx-build`, they must first be loaded with the `in_sphinx_env` option. ````python -from myst_parser import text_to_tokens, render_tokens -from myst_parser.docutils_renderer import SphinxRenderer - -root = text_to_tokens(""" +document = to_docutils(""" Here's some *text* 1. a list @@ -312,9 +298,8 @@ Here's some *text* name definition ``` -""") - -document = render_tokens(root, SphinxRenderer, load_sphinx_env=True) +""", + in_sphinx_env=True) print(document.pformat()) ```` @@ -349,6 +334,7 @@ print(document.pformat()) definition ``` + You can also set Sphinx configuration *via* `sphinx_conf`. This is a dictionary representation of the contents of the Sphinx `conf.py`. ```{warning} @@ -358,10 +344,7 @@ Sphinx build process and/or access to external files. ``` `````python -from myst_parser import text_to_tokens, render_tokens -from myst_parser.docutils_renderer import SphinxRenderer - -root = text_to_tokens(""" +document = to_docutils(""" ````{tabs} ```{tab} Apples @@ -369,9 +352,10 @@ root = text_to_tokens(""" Apples are green, or sometimes red. ``` ```` -""") - -document = render_tokens(root, SphinxRenderer, load_sphinx_env=True, sphinx_conf={"extensions": ["sphinx_tabs.tabs"]}) +""", + in_sphinx_env=True, + conf={"extensions": ["sphinx_tabs.tabs"]} +) print(document.pformat()) ````` diff --git a/myst_parser/main.py b/myst_parser/main.py index 657d641f..bb4ff2b6 100644 --- a/myst_parser/main.py +++ b/myst_parser/main.py @@ -8,10 +8,6 @@ from markdown_it.extensions.texmath import texmath_plugin from markdown_it.extensions.footnote import footnote_plugin -from docutils.nodes import document as docutils_doc -from myst_parser.docutils_renderer import DocutilsRenderer -from myst_parser.docutils_renderer import make_document - from . import __version__ # noqa: F401 @@ -19,14 +15,18 @@ def default_parser( renderer="sphinx", disable_syntax=(), math_delimiters="dollars" ) -> MarkdownIt: """Return the default parser configuration for MyST""" - from myst_parser.sphinx_renderer import SphinxRenderer + if renderer == "sphinx": + from myst_parser.sphinx_renderer import SphinxRenderer + + renderer_cls = SphinxRenderer + elif renderer == "html": + renderer_cls = RendererHTML + elif renderer == "docutils": + from myst_parser.docutils_renderer import DocutilsRenderer - renderers = { - "sphinx": SphinxRenderer, - "docutils": DocutilsRenderer, - "html": RendererHTML, - } - renderer_cls = renderers[renderer] + renderer_cls = DocutilsRenderer + else: + raise ValueError("unknown renderer type: {0}".format(renderer)) md = ( MarkdownIt("commonmark", renderer_cls=renderer_cls) @@ -52,12 +52,13 @@ def to_docutils( text: str, options=None, env=None, - document: docutils_doc = None, + document=None, renderer="sphinx", in_sphinx_env: bool = False, + conf=None, disable_syntax: List[str] = (), math_delimiters: str = "dollars", -) -> docutils_doc: +): """Render text to the docutils AST :param text: the text to render @@ -68,7 +69,10 @@ def to_docutils( :param in_sphinx_env: initialise a minimal sphinx environment (useful for testing) :param disable_syntax: list of syntax element names to disable + :returns: docutils document """ + from myst_parser.docutils_renderer import make_document + md = default_parser( renderer=renderer, disable_syntax=disable_syntax, @@ -80,7 +84,7 @@ def to_docutils( if in_sphinx_env: from myst_parser.sphinx_renderer import mock_sphinx_env - with mock_sphinx_env(document=md.options["document"]): + with mock_sphinx_env(conf=conf, document=md.options["document"]): return md.render(text, env) else: return md.render(text, env) @@ -92,5 +96,5 @@ def to_html(text: str, env=None): def to_tokens(text: str, env=None): - md = default_parser() + md = default_parser("html") return md.parse(text, env) diff --git a/myst_parser/sphinx_renderer.py b/myst_parser/sphinx_renderer.py index d5e80b40..9bad488a 100644 --- a/myst_parser/sphinx_renderer.py +++ b/myst_parser/sphinx_renderer.py @@ -132,8 +132,8 @@ def mock_sphinx_env(conf=None, srcdir=None, document=None): """Set up an environment, to parse sphinx roles/directives, outside of a `sphinx-build`. - :param sphinx_conf: a dictionary representation of the sphinx `conf.py` - :param sphinx_srcdir: a path to a source directory + :param conf: a dictionary representation of the sphinx `conf.py` + :param srcdir: a path to a source directory (for example, can be used for `include` statements) This primarily copies the code in `sphinx.util.docutils.docutils_namespace` From f20333d99e8f60b4e7a7151fef827f0aeb554318 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 15:42:13 +0000 Subject: [PATCH 14/32] Improve coverage --- .../{role_options.md => directive_options.md} | 37 ++++++++++++++----- .../fixtures/syntax_elements.md | 13 ++++++- tests/test_renderers/test_fixtures.py | 4 +- 3 files changed, 41 insertions(+), 13 deletions(-) rename tests/test_renderers/fixtures/{role_options.md => directive_options.md} (84%) diff --git a/tests/test_renderers/fixtures/role_options.md b/tests/test_renderers/fixtures/directive_options.md similarity index 84% rename from tests/test_renderers/fixtures/role_options.md rename to tests/test_renderers/fixtures/directive_options.md index 55f2b79f..e3edabc4 100644 --- a/tests/test_renderers/fixtures/role_options.md +++ b/tests/test_renderers/fixtures/directive_options.md @@ -1,4 +1,4 @@ -Test Role 1: +Test Directive 1: . ```{restructuredtext-test-directive} ``` @@ -10,7 +10,7 @@ Test Role 1: . ----------------------------- -Test Role 2: +Test Directive 2: . ```{restructuredtext-test-directive} foo @@ -25,7 +25,7 @@ foo . ----------------------------- -Test Role 3: +Test Directive 3: . ```{restructuredtext-test-directive} foo ``` @@ -37,7 +37,7 @@ Test Role 3: . ----------------------------- -Test Role 4: +Test Directive 4: . ```{restructuredtext-test-directive} foo bar @@ -52,7 +52,7 @@ bar . ----------------------------- -Test Role 5: +Test Directive 5: . ```{restructuredtext-test-directive} foo bar ``` @@ -64,7 +64,7 @@ Test Role 5: . ----------------------------- -Test Role 6: +Test Directive 6: . ```{restructuredtext-test-directive} foo bar baz @@ -79,7 +79,7 @@ baz . ----------------------------- -Test Role 7: +Test Directive 7: . ```{restructuredtext-test-directive} @@ -95,7 +95,7 @@ foo . ----------------------------- -Test Role Options 1: +Test Directive Options 1: . ```{restructuredtext-test-directive} --- @@ -114,7 +114,7 @@ foo . ----------------------------- -Test Role Options 2: +Test Directive Options 2: . ```{restructuredtext-test-directive} :option1: a @@ -131,7 +131,7 @@ foo . ----------------------------- -Test Role Options Error: +Test Directive Options Error: . ```{restructuredtext-test-directive} :option1 @@ -152,3 +152,20 @@ foo :option2: b foo . + + +----------------------------- +Unknown Directive: +. +```{unknown} +``` +. +<document source="notset"> + <system_message level="3" line="1" source="notset" type="ERROR"> + <paragraph> + Unknown directive type 'unknown' + <system_message level="1" line="1" source="notset" type="INFO"> + <paragraph> + Problem retrieving directive entry from language module 'en': 'str' object has no attribute 'directives'. + Trying "unknown" as canonical directive name. +. \ No newline at end of file diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index bdad8be3..7b3ad665 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -104,8 +104,19 @@ Heading Levels: d . + -------------------------- Block Code: +. + foo +. +<document source="notset"> + <literal_block language="none" xml:space="preserve"> + foo +. + +-------------------------- +Fenced Code: . ```sh foo @@ -117,7 +128,7 @@ foo . -------------------------- -Block Code no language: +Fenced Code no language: . ``` foo diff --git a/tests/test_renderers/test_fixtures.py b/tests/test_renderers/test_fixtures.py index cb157442..f7f3a55c 100644 --- a/tests/test_renderers/test_fixtures.py +++ b/tests/test_renderers/test_fixtures.py @@ -33,9 +33,9 @@ def test_tables(line, title, input, expected): @pytest.mark.parametrize( "line,title,input,expected", - read_fixture_file(FIXTURE_PATH.joinpath("role_options.md")), + read_fixture_file(FIXTURE_PATH.joinpath("directive_options.md")), ) -def test_role_options(line, title, input, expected): +def test_directive_options(line, title, input, expected): document = to_docutils(input) print(document.pformat()) assert "\n".join( From e65142bec48fdcce1a181f9499300d7915930e68 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 15:56:31 +0000 Subject: [PATCH 15/32] improve test coverage --- tests/test_renderers/test_fixtures.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_renderers/test_fixtures.py b/tests/test_renderers/test_fixtures.py index f7f3a55c..8bfbbbb1 100644 --- a/tests/test_renderers/test_fixtures.py +++ b/tests/test_renderers/test_fixtures.py @@ -4,10 +4,16 @@ from markdown_it.utils import read_fixture_file from myst_parser.main import to_docutils +from myst_parser.sphinx_renderer import mock_sphinx_env FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures") +def test_minimal_sphinx(): + with mock_sphinx_env(conf={"author": "bob geldof"}) as app: + assert app.config["author"] == "bob geldof" + + @pytest.mark.parametrize( "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("syntax_elements.md")), From 89a177980148c418607918deda93bda0a72a5356 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 16:16:18 +0000 Subject: [PATCH 16/32] Add test --- .../fixtures/syntax_elements.md | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index 7b3ad665..4e3d6cda 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -572,6 +572,26 @@ c: {"d": 2} . +-------------------------- +Front Matter Bad Yaml: +. +--- +a: { +--- +. +<document source="notset"> + <system_message level="3" line="2" source="notset" type="ERROR"> + <paragraph> + Front matter block: + while parsing a flow node + expected the node content, but found '<stream end>' + in "<unicode string>", line 1, column 5: + a: { + ^ + <literal_block xml:space="preserve"> + a: { +. + -------------------------- Full Test: . From 6214cd9be0ad347f16866860ea0c5d808557f206 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 27 Mar 2020 16:52:06 +0000 Subject: [PATCH 17/32] Update use_api.md --- docs/using/use_api.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/using/use_api.md b/docs/using/use_api.md index 7fb33aae..19625355 100644 --- a/docs/using/use_api.md +++ b/docs/using/use_api.md @@ -10,7 +10,6 @@ MyST-Parser may be used as an API *via* the `myst_parser` package. The raw text is first parsed to syntax 'tokens', then these are converted to other formats using 'renderers'. - ## Quick-Start The simplest way to understand how text will be parsed is using: From 626e5c59e0ae812b5eb17575c31c332bf17278fc Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 12:51:05 +0000 Subject: [PATCH 18/32] Bump markdown-it-py version --- setup.py | 2 +- tests/test_commonmark/test_commonmark.py | 19 +++++-------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/setup.py b/setup.py index 836f7947..a4d4e6b5 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.3.2"], + install_requires=["markdown-it-py~=0.4"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], diff --git a/tests/test_commonmark/test_commonmark.py b/tests/test_commonmark/test_commonmark.py index ca2c8b75..27943e2c 100644 --- a/tests/test_commonmark/test_commonmark.py +++ b/tests/test_commonmark/test_commonmark.py @@ -24,24 +24,15 @@ def test_commonmark(entry): pytest.skip( "Thematic breaks on the first line conflict with front matter syntax" ) - if entry["example"] in [108, 334]: - # TODO fix failing empty code span tests (awaiting upstream); - # ``` ``` -> <code> </code> not <code></code> - pytest.skip("empty code span spacing") - if entry["example"] in [ - 171, # [foo]: /url\\bar\\*baz \"foo\\\"bar\\baz\"\n\n[foo]\n - 306, # <http://example.com?find=\\*>\n - 308, # [foo](/bar\\* \"ti\\*tle\")\n - 309, # [foo]\n\n[foo]: /bar\\* \"ti\\*tle\"\n - 310, # ``` foo\\+bar\nfoo\n```\n - 502, # [link](/url \"title \\\""\")\n - 599, # <http://example.com/\\[\\>\n - ]: - # TODO fix url backslash escaping (awaiting upstream) + if entry["example"] == 599: # <http://example.com/\\[\\>\n + # TODO awaiting upstream fix pytest.skip("url backslash escaping") test_case = entry["markdown"] output = to_html(test_case) + if entry["example"] == 593: + # this doesn't have any bearing on the output + output = output.replace("mailto", "MAILTO") if entry["example"] in [187, 209, 210]: # this doesn't have any bearing on the output output = output.replace( From d132cd2d2fea9a22e5d323c187aa6229f45dd57b Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 16:35:42 +0000 Subject: [PATCH 19/32] Test reporter warnings --- myst_parser/docutils_renderer.py | 47 +++++++++----- myst_parser/mocking.py | 2 +- setup.py | 2 +- .../fixtures/directive_options.md | 5 +- .../fixtures/reporter_warnings.md | 65 +++++++++++++++++++ .../fixtures/syntax_elements.md | 2 +- tests/test_renderers/test_error_reporting.py | 27 ++++++++ 7 files changed, 128 insertions(+), 22 deletions(-) create mode 100644 tests/test_renderers/fixtures/reporter_warnings.md create mode 100644 tests/test_renderers/test_error_reporting.py diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 208f2a03..5833ac21 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -102,10 +102,21 @@ def render(self, tokens: List[Token], options, env: AttrDict): if f"render_{token.type}" in self.rules: self.rules[f"render_{token.type}"](token) else: - # TODO make this reporter warning - print(f"no render method for: {token.type}") + self.current_node.append( + self.reporter.warning( + f"No render method for: {token.type}", line=token.map[0] + ) + ) - # TODO log warning for duplicate reference definitions + # log warnings for duplicate reference definitions + # "duplicate_refs": [{"href": "ijk", "label": "B", "map": [4, 5], "title": ""}], + for dup_ref in self.env.get("duplicate_refs", []): + self.document.append( + self.reporter.warning( + f"Duplicate reference definition: {dup_ref['label']}", + line=dup_ref["map"][0] + 1, + ) + ) if not self.config.get("output_footnotes", True): return self.document @@ -119,6 +130,8 @@ def render(self, tokens: List[Token], options, env: AttrDict): if refnode["refname"] not in foot_refs: foot_refs[refnode["refname"]] = True + # TODO log warning for duplicate footnote definitions + if foot_refs: self.current_node.append(nodes.transition()) for footref in foot_refs: @@ -166,8 +179,11 @@ def nested_render_text(self, text: str, lineno: int, disable_front_matter=True): if f"render_{token.type}" in self.rules: self.rules[f"render_{token.type}"](token) else: - # TODO make this reporter warning - print(f"no render method for: {token.type}") + self.current_node.append( + self.reporter.warning( + f"No render method for: {token.type}", line=token.map[0] + ) + ) @contextmanager def current_node_context(self, node, append: bool = False): @@ -189,7 +205,7 @@ def render_children(self, token): def add_line_and_source_path(self, node, token): """Copy the line number and document source path to the docutils node.""" try: - node.line = token.map[0] + 1 + node.line = token.map[0] except (AttributeError, TypeError): pass node.source = self.document["source"] @@ -402,11 +418,10 @@ def render_link_open(self, token): self.render_children(token) def handle_cross_reference(self, token, destination): - # TODO use the docutils error reporting mechanisms, rather than raising if not self.config.get("ignore_missing_refs", False): - raise NotImplementedError( - "reference not found in current document: {} (lines: {})".format( - destination, token.map + self.current_node.append( + self.reporter.warning( + f"Reference not found: {destination}", line=token.map[0] ) ) @@ -426,7 +441,8 @@ def render_image(self, token): img_node = nodes.image() self.add_line_and_source_path(img_node, token) img_node["uri"] = token.attrGet("src") - # TODO ideally we would render proper markup here + # TODO ideally we would render proper markup here, + # this probably requires an upstream change in sphinx img_node["alt"] = self.renderInlineAsText(token.children) self.current_node.append(img_node) @@ -451,7 +467,7 @@ def render_front_matter(self, token): data = yaml.safe_load(token.content) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: msg_node = self.reporter.error( - "Front matter block:\n" + str(error), line=token.map[0] + 1 + "Front matter block:\n" + str(error), line=token.map[0] ) msg_node += nodes.literal_block(token.content, token.content) self.current_node += [msg_node] @@ -606,7 +622,7 @@ def render_directive(self, token: Token): ) # type: (Directive, list) if not directive_class: error = self.reporter.error( - "Unknown directive type '{}'\n".format(name), + 'Unknown directive type "{}".\n'.format(name), # nodes.literal_block(content, content), line=position, ) @@ -619,7 +635,7 @@ def render_directive(self, token: Token): ) except DirectiveParsingError as error: error = self.reporter.error( - "Directive '{}':\n{}".format(name, error), + "Directive '{}': {}".format(name, error), nodes.literal_block(content, content), line=position, ) @@ -669,7 +685,7 @@ def render_directive(self, token: Token): result = [msg_node] except MockingError as exc: error = self.reporter.error( - "Directive '{}' cannot be mocked:\n{}: {}".format( + "Directive '{}' cannot be mocked: {}: {}".format( name, exc.__class__.__name__, exc ), nodes.literal_block(content, content), @@ -691,7 +707,6 @@ def render_directive(self, token: Token): def dict_to_docinfo(data): """Render a key/val pair as a docutils field node.""" - # TODO this data could be used to support default option values for directives docinfo = nodes.docinfo() for key, value in data.items(): diff --git a/myst_parser/mocking.py b/myst_parser/mocking.py index 3c069bae..456d6d55 100644 --- a/myst_parser/mocking.py +++ b/myst_parser/mocking.py @@ -295,7 +295,7 @@ def run(self): except Exception as error: raise DirectiveError( 4, - 'Directive "{}": error reading file: {}\n{error}.'.format( + 'Directive "{}": error reading file: {}\n{}.'.format( self.name, path, error ), ) diff --git a/setup.py b/setup.py index a4d4e6b5..74640ac1 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.4"], + install_requires=["markdown-it-py~=0.4.1"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], diff --git a/tests/test_renderers/fixtures/directive_options.md b/tests/test_renderers/fixtures/directive_options.md index e3edabc4..60ef8409 100644 --- a/tests/test_renderers/fixtures/directive_options.md +++ b/tests/test_renderers/fixtures/directive_options.md @@ -142,8 +142,7 @@ foo <document source="notset"> <system_message level="3" line="1" source="notset" type="ERROR"> <paragraph> - Directive 'restructuredtext-test-directive': - Invalid options YAML: mapping values are not allowed here + Directive 'restructuredtext-test-directive': Invalid options YAML: mapping values are not allowed here in "<unicode string>", line 2, column 8: option2: b ^ @@ -163,7 +162,7 @@ Unknown Directive: <document source="notset"> <system_message level="3" line="1" source="notset" type="ERROR"> <paragraph> - Unknown directive type 'unknown' + Unknown directive type "unknown". <system_message level="1" line="1" source="notset" type="INFO"> <paragraph> Problem retrieving directive entry from language module 'en': 'str' object has no attribute 'directives'. diff --git a/tests/test_renderers/fixtures/reporter_warnings.md b/tests/test_renderers/fixtures/reporter_warnings.md new file mode 100644 index 00000000..1d0d858d --- /dev/null +++ b/tests/test_renderers/fixtures/reporter_warnings.md @@ -0,0 +1,65 @@ +Duplicate Reference definitions: +. +[a]: b +[a]: c +. +source/path:2: (WARNING/2) Duplicate reference definition: A +. + +Missing Reference: +. +[a](b) +. +source/path:1: (WARNING/2) Reference not found: b +. + +Unknown role: +. +abc + +{xyz}`a` +. +source/path:3: (ERROR/3) Unknown interpreted text role "xyz". +. + +Unknown directive: +. + +```{xyz} +``` +. +source/path:2: (ERROR/3) Unknown directive type "xyz". +. + +Bad Front Matter: +. +--- +a: { +--- +. +source/path:1: (ERROR/3) Front matter block: +while parsing a flow node +expected the node content, but found '<stream end>' + in "<unicode string>", line 1, column 5: + a: { + ^ +. + +Directive parsing error: +. + +```{class} +``` +. +source/path:2: (ERROR/3) Directive 'class': 1 argument(s) required, 0 supplied +. + +Directive run error: +. + +```{date} +x +``` +. +source/path:2: (ERROR/3) Invalid context: the "date" directive can only be used within a substitution definition. +. diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index 4e3d6cda..942f0bda 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -580,7 +580,7 @@ a: { --- . <document source="notset"> - <system_message level="3" line="2" source="notset" type="ERROR"> + <system_message level="3" line="1" source="notset" type="ERROR"> <paragraph> Front matter block: while parsing a flow node diff --git a/tests/test_renderers/test_error_reporting.py b/tests/test_renderers/test_error_reporting.py new file mode 100644 index 00000000..2c333d09 --- /dev/null +++ b/tests/test_renderers/test_error_reporting.py @@ -0,0 +1,27 @@ +from pathlib import Path + +import pytest + +from markdown_it.utils import read_fixture_file +from myst_parser.docutils_renderer import make_document +from myst_parser.main import to_docutils + + +FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures") + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("reporter_warnings.md")), +) +def test_basic(line, title, input, expected): + document = make_document("source/path") + messages = [] + + def observer(msg_node): + if msg_node["level"] > 1: + messages.append(msg_node.astext()) + + document.reporter.attach_observer(observer) + to_docutils(input, document=document, renderer="docutils") + assert "\n".join(messages).rstrip() == expected.rstrip() From 4fcf732f55eacd9b15d8fe336fa24fe764c426ea Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 17:32:50 +0000 Subject: [PATCH 20/32] Add tests and fixes for reporter warnings and include directive --- myst_parser/docutils_renderer.py | 15 ++--- myst_parser/main.py | 13 ++-- myst_parser/mocking.py | 4 +- myst_parser/parse_directives.py | 2 + tests/test_renderers/fixtures/mock_include.md | 62 +++++++++++++++++++ .../fixtures/mock_include_errors.md | 24 +++++++ .../test_renderers/test_include_directive.py | 43 +++++++++++++ 7 files changed, 144 insertions(+), 19 deletions(-) create mode 100644 tests/test_renderers/fixtures/mock_include.md create mode 100644 tests/test_renderers/fixtures/mock_include_errors.md create mode 100644 tests/test_renderers/test_include_directive.py diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 5833ac21..c44d8edc 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -139,18 +139,11 @@ def render(self, tokens: List[Token], options, env: AttrDict): return self.document - def nested_render_text(self, text: str, lineno: int, disable_front_matter=True): + def nested_render_text(self, text: str, lineno: int): """Render unparsed text.""" - - if disable_front_matter: - # parse without front matter - with self.md.reset_rules(): - self.md.disable("front_matter", True) - tokens = self.md.parse(text, self.env) - else: - tokens = self.md.parse(text, self.env) - if tokens[0].type == "front_matter": - tokens.pop(0) + tokens = self.md.parse(text + "\n", self.env) + if tokens and tokens[0].type == "front_matter": + tokens.pop(0) # set correct line numbers for token in tokens: diff --git a/myst_parser/main.py b/myst_parser/main.py index bb4ff2b6..e7424b26 100644 --- a/myst_parser/main.py +++ b/myst_parser/main.py @@ -52,12 +52,13 @@ def to_docutils( text: str, options=None, env=None, - document=None, + disable_syntax: List[str] = (), + math_delimiters: str = "dollars", renderer="sphinx", + document=None, in_sphinx_env: bool = False, conf=None, - disable_syntax: List[str] = (), - math_delimiters: str = "dollars", + srcdir=None, ): """Render text to the docutils AST @@ -65,9 +66,11 @@ def to_docutils( :param options: options to update the parser with :param env: The sandbox environment for the parse (will contain e.g. reference definitions) + :param disable_syntax: list of syntax element names to disable :param document: the docutils root node to use (otherwise a new one will be created) :param in_sphinx_env: initialise a minimal sphinx environment (useful for testing) - :param disable_syntax: list of syntax element names to disable + :param conf: the sphinx conf.py as a dictionary + :param srcdir: to parse to the mock sphinc env :returns: docutils document """ @@ -84,7 +87,7 @@ def to_docutils( if in_sphinx_env: from myst_parser.sphinx_renderer import mock_sphinx_env - with mock_sphinx_env(conf=conf, document=md.options["document"]): + with mock_sphinx_env(conf=conf, srcdir=srcdir, document=md.options["document"]): return md.render(text, env) else: return md.render(text, env) diff --git a/myst_parser/mocking.py b/myst_parser/mocking.py index 456d6d55..0b6e0f92 100644 --- a/myst_parser/mocking.py +++ b/myst_parser/mocking.py @@ -374,9 +374,7 @@ def run(self): self.renderer.document["source"] = str(path) self.renderer.reporter.source = str(path) self.renderer.reporter.get_source_and_line = lambda l: (str(path), l) - self.renderer.nested_render_text( - file_content, startline, disable_front_matter=False - ) + self.renderer.nested_render_text(file_content, startline + 1) finally: self.renderer.document["source"] = source self.renderer.reporter.source = rsource diff --git a/myst_parser/parse_directives.py b/myst_parser/parse_directives.py index 5d0366b9..e9a1ee0e 100644 --- a/myst_parser/parse_directives.py +++ b/myst_parser/parse_directives.py @@ -134,6 +134,8 @@ def parse_directive_options( options_spec = directive_class.option_spec # type: Dict[str, Callable] for name, value in list(options.items()): convertor = options_spec.get(name, None) + if value is True: + value = "" # flag converter requires no argument if convertor is None: raise DirectiveParsingError("Unknown option: {}".format(name)) try: diff --git a/tests/test_renderers/fixtures/mock_include.md b/tests/test_renderers/fixtures/mock_include.md new file mode 100644 index 00000000..32d779e5 --- /dev/null +++ b/tests/test_renderers/fixtures/mock_include.md @@ -0,0 +1,62 @@ +Basic Include: +. +```{include} other.md +``` +. +<document source="tmpdir/test.md"> + <paragraph> + a + + b + + c +. + +Include with Front Matter (should be ignored): +. +```{include} fmatter.md +``` +. +<document source="tmpdir/test.md"> + <paragraph> + b +. + +Include Literal: +. +```{include} other.md +:literal: True +``` +. +<document source="tmpdir/test.md"> + <literal_block source="tmpdir/other.md" xml:space="preserve"> + a + b + c +. + +Include Literal, line range: +. +```{include} other.md +:literal: True +:start-line: 1 +:end-line: 2 +``` +. +<document source="tmpdir/test.md"> + <literal_block source="tmpdir/other.md" xml:space="preserve"> + b +. + +Include code: +. +```{include} other.md +:code: md +``` +. +<document source="tmpdir/test.md"> + <literal_block classes="code md" source="tmpdir/other.md" xml:space="preserve"> + a + b + c +. diff --git a/tests/test_renderers/fixtures/mock_include_errors.md b/tests/test_renderers/fixtures/mock_include_errors.md new file mode 100644 index 00000000..de35edd5 --- /dev/null +++ b/tests/test_renderers/fixtures/mock_include_errors.md @@ -0,0 +1,24 @@ +Missing path: +. +```{include} +``` +. +tmpdir/test.md:1: (ERROR/3) Directive 'include': 1 argument(s) required, 0 supplied +. + +Non-existent path: +. +```{include} other.md +``` +. +tmpdir/test.md:1: (SEVERE/4) Directive "include": error reading file: tmpdir/other.md +[Errno 2] No such file or directory: 'tmpdir/other.md'. +. + +Error in include file: +. +```{include} bad.md +``` +. +tmpdir/bad.md:1: (ERROR/3) Unknown interpreted text role "a". +. diff --git a/tests/test_renderers/test_include_directive.py b/tests/test_renderers/test_include_directive.py new file mode 100644 index 00000000..21a0225f --- /dev/null +++ b/tests/test_renderers/test_include_directive.py @@ -0,0 +1,43 @@ +from pathlib import Path + +import pytest + +from markdown_it.utils import read_fixture_file +from myst_parser.docutils_renderer import make_document +from myst_parser.main import to_docutils + + +FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures") + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("mock_include.md")), +) +def test_render(line, title, input, expected, tmp_path): + tmp_path.joinpath("other.md").write_text("a\nb\nc") + tmp_path.joinpath("fmatter.md").write_text("---\na: 1\n---\nb") + document = make_document(str(tmp_path / "test.md")) + to_docutils(input, document=document, in_sphinx_env=True, srcdir=str(tmp_path)) + output = document.pformat().replace(str(tmp_path), "tmpdir").rstrip() + print(output) + assert output == expected.rstrip() + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("mock_include_errors.md")), +) +def test_errors(line, title, input, expected, tmp_path): + tmp_path.joinpath("bad.md").write_text("{a}`b`") + document = make_document(str(tmp_path / "test.md")) + messages = [] + + def observer(msg_node): + if msg_node["level"] > 1: + messages.append(msg_node.astext().replace(str(tmp_path), "tmpdir")) + + document.reporter.attach_observer(observer) + document.reporter.halt_level = 6 + to_docutils(input, document=document, in_sphinx_env=True, srcdir=str(tmp_path)) + assert "\n".join(messages).rstrip() == expected.rstrip() From cd83c70c52b16da5e6c7c0a25c710564d8288e8a Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 19:53:44 +0000 Subject: [PATCH 21/32] Add documentation of sphinx parser options --- docs/api/renderers.rst | 2 +- docs/api/sphinx_parser.rst | 5 +++-- docs/using/intro.md | 21 +++++++++++++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/docs/api/renderers.rst b/docs/api/renderers.rst index 0229bbb9..6e982bfd 100644 --- a/docs/api/renderers.rst +++ b/docs/api/renderers.rst @@ -38,7 +38,7 @@ These classes are parsed to sphinx roles and directives, to mimic the original docutls rST specific parser elements, but instead run nested parsing with the markdown parser. -. autoclass:: myst_parser.mocking.MockInliner +.. autoclass:: myst_parser.mocking.MockInliner :members: :undoc-members: :show-inheritance: diff --git a/docs/api/sphinx_parser.rst b/docs/api/sphinx_parser.rst index f7334b3e..7d663895 100644 --- a/docs/api/sphinx_parser.rst +++ b/docs/api/sphinx_parser.rst @@ -7,7 +7,8 @@ This class builds on the :py:class:`~myst_parser.sphinx_renderer.SphinxRenderer` to generate a parser for Sphinx, using the :ref:`Sphinx parser API <sphinx:parser-api>`: .. autoclass:: myst_parser.sphinx_parser.MystParser - :members: - :no-undoc-members: + :members: default_config, supported, parse + :undoc-members: + :member-order: bysource :show-inheritance: :exclude-members: __init__ diff --git a/docs/using/intro.md b/docs/using/intro.md index 8a3b1857..2e2469e6 100644 --- a/docs/using/intro.md +++ b/docs/using/intro.md @@ -45,6 +45,27 @@ To use the MyST parser in Sphinx, simply add: `extensions = ["myst_parser"]` to Naturally this site is generated with Sphinx and MyST! +Some configuration options are also available using `myst_config` in your `conf.py`. +You can currently change the math bracket setting, and disable parsing of any of the syntax elements: + +```python +extensions = ["myst_parser"] +myst_config = {"disable_syntax": ["emphasis"], "math_delimiters": "brackets"} +``` + +```md +*emphasis is now disabled* + +\[a=1\] +``` + +```{seealso} +The {py:class}`~myst_parser.sphinx_parser.MystParser` class API +and +[markdown-it-py](https://github.com/ExecutableBookProject/markdown-it-py) +for the list of syntax elements (known as rules) that you can disable. +``` + ## Parsing Performance Benchmark MyST-Parser uses the fastest, __*CommonMark compliant*__, parser written in python! From 77af8e1b7c961a090c6fc84d67eaa47c2455ea13 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 19:57:22 +0000 Subject: [PATCH 22/32] bump version for alpha release --- myst_parser/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index f0e09a84..0fe55568 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.8.0" +__version__ = "0.9.0a1" def setup(app): From 95a0404b01a1bee43acd216586d757f3822b5756 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 20:05:39 +0000 Subject: [PATCH 23/32] Apply doc fixes suggested by @rossbar in #121 --- docs/develop/test_infrastructure.md | 2 +- docs/index.md | 2 +- docs/using/syntax.md | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/develop/test_infrastructure.md b/docs/develop/test_infrastructure.md index 536ecbd0..0479e8d9 100644 --- a/docs/develop/test_infrastructure.md +++ b/docs/develop/test_infrastructure.md @@ -12,7 +12,7 @@ The tests are run using [pytest](https://docs.pytest.org)/[GitHub Actions](https The tests are ordered in a hierarchical fashion: -1. In `tests/test_commonmark` the [CommonMark](https://github.com/commonmark/CommonMark.git) test set is run; to check that the parser is complying with the CommonMark specification. +1. In `tests/test_commonmark` the [CommonMark](https://github.com/commonmark/CommonMark.git) test set is run to check that the parser is complying with the CommonMark specification. 2. In `tests/test_renderers` are tests that check that the Markdown AST is being correctly converted to the docutils/sphinx AST. This includes testing that roles and directives are correctly parsed and run. 3. In `tests/test_sphinx` are tests that check that minimal sphinx project builds are running correctly, to convert MyST markdown files to HTML. 4. In `.circleci` the package documentation (written in MyST format) is built and tested for build errors/warnings. diff --git a/docs/index.md b/docs/index.md index b481b3dd..c01aa4e9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -10,7 +10,7 @@ MyST syntax and {doc}`Sphinx <sphinx:intro>`. This allows for native markdown su directives. ```{warning} -The MyST parser is in an alpha stage, and may have breaking syntax to its implementation +The MyST parser is in an alpha stage, and may have breaking changes to its implementation and to the syntax that it supports. Use at your own risk. If you find any issues, please report them [in the MyST issues](https://github.com/ExecutableBookProject/meta/issues/24) diff --git a/docs/using/syntax.md b/docs/using/syntax.md index 5e6dd7f0..1924ec89 100644 --- a/docs/using/syntax.md +++ b/docs/using/syntax.md @@ -450,7 +450,7 @@ print('yep!') ## Roles - an in-line extension point Roles are similar to directives - they allow you to define arbitrary new -functionality in Sphinx, but they are use *in-line*. To define an in-line +functionality in Sphinx, but they are used *in-line*. To define an in-line role, use the following form: ````{list-table} @@ -600,8 +600,8 @@ This is the best equation {eq}`eqn:best` ### Front Matter This is a YAML block at the start of the document, as used for example in -[jekyll](https://jekyllrb.com/docs/front-matter/). Sphinx intercepts this data and -stores it within the global environment (as discussed +[jekyll](https://jekyllrb.com/docs/front-matter/). Sphinx intercepts these data and +stores them within the global environment (as discussed [here](https://www.sphinx-doc.org/en/master/usage/restructuredtext/field-lists.html)). A classic use-case is to specify 'orphan' documents, that are not specified in any @@ -634,7 +634,7 @@ Is below, but it won't be parsed into the document. % my comment ````{important} -Since comments are a block level entity, they will terminate the previous block. +Since comments are a block-level entity, they will terminate the previous block. In practical terms, this means that the following lines will be broken up into two paragraphs, resulting in a new line between them: From 034be2a335457e305f60bfa8156866e01c37fe77 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 28 Mar 2020 20:09:42 +0000 Subject: [PATCH 24/32] Update __init__.py --- myst_parser/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index 0fe55568..f9d77e17 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.9.0a1" +__version__ = "0.8.0a1" def setup(app): From c97d2d7a5c628cf6b1f29c37715bf57477dae5cb Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sun, 29 Mar 2020 08:37:44 +0100 Subject: [PATCH 25/32] bump markdown-it-py version --- myst_parser/__init__.py | 2 +- myst_parser/docutils_renderer.py | 21 ++++++++++++--------- setup.py | 2 +- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index f9d77e17..09954a5f 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.8.0a1" +__version__ = "0.8.0a2" def setup(app): diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index c44d8edc..5fed37fd 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -456,15 +456,18 @@ def render_front_matter(self, token): since `process_doc` just converts them back to text. """ - try: - data = yaml.safe_load(token.content) - except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: - msg_node = self.reporter.error( - "Front matter block:\n" + str(error), line=token.map[0] - ) - msg_node += nodes.literal_block(token.content, token.content) - self.current_node += [msg_node] - return + if not isinstance(token.content, dict): + try: + data = yaml.safe_load(token.content) + except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: + msg_node = self.reporter.error( + "Front matter block:\n" + str(error), line=token.map[0] + ) + msg_node += nodes.literal_block(token.content, token.content) + self.current_node += [msg_node] + return + else: + data = token.content docinfo = dict_to_docinfo(data) self.current_node.append(docinfo) diff --git a/setup.py b/setup.py index 74640ac1..a2791fbe 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.4.1"], + install_requires=["markdown-it-py~=0.4.2"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], From 81d0bfb3774ad6b0df66dc9f23c351a11a7495f6 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sun, 29 Mar 2020 09:22:42 +0100 Subject: [PATCH 26/32] Bump markdown-it-py version --- myst_parser/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index 09954a5f..4ff6ada4 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.8.0a2" +__version__ = "0.8.0a3" def setup(app): diff --git a/setup.py b/setup.py index a2791fbe..614c160f 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.4.2"], + install_requires=["markdown-it-py~=0.4.3"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], From e64898a84c3e8971f3df8683b49728fb32f793b3 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sun, 29 Mar 2020 17:49:04 +0100 Subject: [PATCH 27/32] Update conf.py --- docs/conf.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/conf.py b/docs/conf.py index dbad9387..42ef0493 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -52,6 +52,9 @@ # html_theme = "pydata_sphinx_theme" html_logo = "_static/logo.png" +html_theme_options = { + "github_url": "https://github.com/ExecutableBookProject/MyST-Parser" +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, From d53a9b5c01e08a2c462973d64ed81633f6f59dcf Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 30 Mar 2020 04:58:01 +0100 Subject: [PATCH 28/32] Add title to image node --- myst_parser/docutils_renderer.py | 10 ++++++++-- tests/test_renderers/fixtures/docutil_directives.md | 4 +++- tests/test_renderers/fixtures/sphinx_directives.md | 13 +++++++++---- tests/test_renderers/fixtures/syntax_elements.md | 2 +- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 5fed37fd..8481d6f8 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -193,7 +193,11 @@ def render_children(self, token): if f"render_{child.type}" in self.rules: self.rules[f"render_{child.type}"](child) else: - print(f"no render method for: {child.type}") + self.current_node.append( + self.reporter.warning( + f"No render method for: {child.type}", line=child.map[0] + ) + ) def add_line_and_source_path(self, node, token): """Copy the line number and document source path to the docutils node.""" @@ -437,7 +441,9 @@ def render_image(self, token): # TODO ideally we would render proper markup here, # this probably requires an upstream change in sphinx img_node["alt"] = self.renderInlineAsText(token.children) - + title = token.attrGet("title") + if title: + img_node["title"] = token.attrGet("title") self.current_node.append(img_node) # ### render methods for plugin tokens diff --git a/tests/test_renderers/fixtures/docutil_directives.md b/tests/test_renderers/fixtures/docutil_directives.md index e61a4601..32792501 100644 --- a/tests/test_renderers/fixtures/docutil_directives.md +++ b/tests/test_renderers/fixtures/docutil_directives.md @@ -294,10 +294,12 @@ a image (`docutils.parsers.rst.directives.images.Image`): . ```{image} path/to/image +:alt: abc +:name: name ``` . <document source="notset"> - <image uri="path/to/image"> + <image alt="abc" ids="name" names="name" uri="path/to/image"> . -------------------------------- diff --git a/tests/test_renderers/fixtures/sphinx_directives.md b/tests/test_renderers/fixtures/sphinx_directives.md index e856684f..e87e9123 100644 --- a/tests/test_renderers/fixtures/sphinx_directives.md +++ b/tests/test_renderers/fixtures/sphinx_directives.md @@ -236,7 +236,7 @@ figure (`sphinx.directives.patches.Figure`): . ```{figure} path/to/figure -caption +*caption* legend ``` @@ -245,7 +245,8 @@ legend <figure> <image uri="path/to/figure"> <caption> - caption + <emphasis> + caption <legend> <paragraph> legend @@ -265,7 +266,8 @@ foo -------------------------------- table (`sphinx.directives.patches.RSTTable`): . -```{table} +```{table} *title* +:name: name | a | b | |---|---| @@ -273,7 +275,10 @@ table (`sphinx.directives.patches.RSTTable`): ``` . <document source="notset"> - <table classes="colwidths-auto"> + <table classes="colwidths-auto" ids="name" names="name"> + <title> + <emphasis> + title <tgroup cols="2"> <colspec colwidth="50.0"> <colspec colwidth="50.0"> diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index 942f0bda..7d4f8d1d 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -156,7 +156,7 @@ Image with alt and title: . <document source="notset"> <paragraph> - <image alt="alt" uri="src"> + <image alt="alt" title="title" uri="src"> . -------------------------- From 826015fa20c8a0090d579b86eeda3b1d900a8220 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 30 Mar 2020 05:23:08 +0100 Subject: [PATCH 29/32] Add warning for non-consecutive headings --- myst_parser/docutils_renderer.py | 13 +++++++++++-- tests/test_renderers/fixtures/reporter_warnings.md | 8 ++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/myst_parser/docutils_renderer.py b/myst_parser/docutils_renderer.py index 8481d6f8..dccad4aa 100644 --- a/myst_parser/docutils_renderer.py +++ b/myst_parser/docutils_renderer.py @@ -216,6 +216,17 @@ def add_section(self, section, level): for section_level in self._level_to_elem if level > section_level ) + + if (level > parent_level) and (parent_level + 1 != level): + self.current_node.append( + self.reporter.warning( + "Non-consecutive header level increase; {} to {}".format( + parent_level, level + ), + line=section.line, + ) + ) + parent = self._level_to_elem[parent_level] parent.append(section) self._level_to_elem[level] = section @@ -356,8 +367,6 @@ def render_heading_open(self, token): self.add_line_and_source_path(new_section, token) new_section.append(title_node) - # TODO add extra virtual section for non-consecutive levels - # (e.g. 1 to 3) or raise warning? self.add_section(new_section, level) self.current_node = title_node diff --git a/tests/test_renderers/fixtures/reporter_warnings.md b/tests/test_renderers/fixtures/reporter_warnings.md index 1d0d858d..cc48be2b 100644 --- a/tests/test_renderers/fixtures/reporter_warnings.md +++ b/tests/test_renderers/fixtures/reporter_warnings.md @@ -63,3 +63,11 @@ x . source/path:2: (ERROR/3) Invalid context: the "date" directive can only be used within a substitution definition. . + +Non-consecutive headings: +. +# title 1 +### title 3 +. +source/path:2: (WARNING/2) Non-consecutive header level increase; 1 to 3 +. From 63f0e3e6d883aa9d51927fbe8a79d00febcd9a09 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 30 Mar 2020 06:17:08 +0100 Subject: [PATCH 30/32] test fix --- tests/test_renderers/fixtures/syntax_elements.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index 7d4f8d1d..bdeb08cf 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -333,7 +333,7 @@ Referencing: (target)= Title ------ +===== [alt1](target) From 73db682a311b6ca62e0b729d2e13bdeb2a4169cd Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 30 Mar 2020 13:00:07 +0100 Subject: [PATCH 31/32] Update bug_report.md --- .github/ISSUE_TEMPLATE/bug_report.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index adb23c5d..79a0e3a7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -20,8 +20,17 @@ Steps to reproduce the behavior: **Expected behavior** A clear and concise description of what you expected to happen. -**Screenshots** -If applicable, add screenshots to help explain your problem. +If relevant, a minimal example of the input text should be supplied, +together with a screen-shot of the output Sphinx document and/or command-line output, e.g. + +```markdown +some text... +``` + +```console +$ make html +ERROR ... +``` **Environment (please complete the following information):** - Python Version [e.g. 3.7.1] From 60ea3a248ab12a2305b7ecb54e3deb8ccf56d84f Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 1 Apr 2020 07:02:53 +0100 Subject: [PATCH 32/32] Bump markdown-it version (allow multi-line inline math) --- myst_parser/__init__.py | 2 +- myst_parser/sphinx_renderer.py | 2 +- setup.py | 2 +- .../fixtures/syntax_elements.md | 29 +++++++++++++++++++ 4 files changed, 32 insertions(+), 3 deletions(-) diff --git a/myst_parser/__init__.py b/myst_parser/__init__.py index 4ff6ada4..5ee5a1e1 100644 --- a/myst_parser/__init__.py +++ b/myst_parser/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.8.0a3" +__version__ = "0.8.0a5" def setup(app): diff --git a/myst_parser/sphinx_renderer.py b/myst_parser/sphinx_renderer.py index 9bad488a..148fb18b 100644 --- a/myst_parser/sphinx_renderer.py +++ b/myst_parser/sphinx_renderer.py @@ -1,6 +1,7 @@ from contextlib import contextmanager import copy from urllib.parse import unquote +import tempfile from typing import cast from docutils import nodes @@ -115,7 +116,6 @@ def __init__(self, confoverrides=None, srcdir=None): self.preload_builder(buildername) self.config.init_values() self.events.emit("config-inited", self.config) - import tempfile with tempfile.TemporaryDirectory() as tempdir: # creating a builder attempts to make the doctreedir diff --git a/setup.py b/setup.py index 614c160f..736c99f9 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", - install_requires=["markdown-it-py~=0.4.3"], + install_requires=["markdown-it-py~=0.4.5"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], diff --git a/tests/test_renderers/fixtures/syntax_elements.md b/tests/test_renderers/fixtures/syntax_elements.md index bdeb08cf..a4a13d4d 100644 --- a/tests/test_renderers/fixtures/syntax_elements.md +++ b/tests/test_renderers/fixtures/syntax_elements.md @@ -265,6 +265,35 @@ $foo$ foo . +-------------------------- +Inline Math, multi-line: +. +a $foo +bar$ b +. +<document source="notset"> + <paragraph> + a + <math> + foo + bar + b +. + +-------------------------- +Inline Math, multi-line with line break (invalid): +. +a $foo + +bar$ b +. +<document source="notset"> + <paragraph> + a $foo + <paragraph> + bar$ b +. + -------------------------- Math Block: .