diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml
index 1e57cd6a..663ae658 100644
--- a/.github/workflows/python.yaml
+++ b/.github/workflows/python.yaml
@@ -6,10 +6,18 @@ on:
branches: [ master ]
jobs:
build:
- runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [pypy3, 3.5, 3.6, 3.7, 3.8, 3.9]
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ python-version: [pypy3, 3.5, 3.6, 3.7, 3.8, 3.9, "3.10"]
+ exclude:
+ # apparently win/mac don't have pypy3
+ - os: windows-latest
+ python-version: pypy3
+ - os: macos-latest
+ python-version: pypy3
+ runs-on: ${{ matrix.os }}
+
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
@@ -19,7 +27,10 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install Pygments>=2.5.2
+ pip install Pygments>=2.5.2 pytest
- name: Test
+ env:
+ PYTHONPATH: lib
run: |
- make testone
+ py.test -k "not knownfailure" test
+
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..4cfdfd7a
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,85 @@
+[tool.pytest.ini_options]
+# callecta ll tags with:
+# cat test/tm-cases/*tags \
+# | sed 's/#.*//' \
+# | tr ' ' '\n' \
+# | sort \
+# | uniq \
+# | tr '-' '_' \
+# | awk '{print " \"" $1 "\","}'
+
+markers = [
+ "blank",
+ "code_color",
+ "code_as_com",
+ "code_friendly",
+ "cuddle_lists",
+ "dontcare",
+ "emacs",
+ "eol",
+ "escape",
+ "extra",
+ "extras",
+ "fenced_code_blocks",
+ "footnotes",
+ "fromphpmarkdown",
+ "header_ids",
+ "highlightjs_lang",
+ "html_classes",
+ "htmlentities",
+ "indentation",
+ "issue104",
+ "issue113",
+ "issue127",
+ "issue135",
+ "issue15",
+ "issue16",
+ "issue165",
+ "issue18",
+ "issue21",
+ "issue213",
+ "issue216",
+ "issue24",
+ "issue26",
+ "issue27",
+ "issue3",
+ "issue30",
+ "issue33",
+ "issue36",
+ "issue42",
+ "issue52",
+ "issue54",
+ "issue57",
+ "issue67",
+ "issue7",
+ "issue74",
+ "issue76",
+ "issue78",
+ "issue84",
+ "issue86",
+ "issue87",
+ "issue9",
+ "issue90",
+ "knownfailure",
+ "link_patterns",
+ "markdown_in_html",
+ "metadata",
+ "nofollow",
+ "pi",
+ "pygments",
+ "pyshell",
+ "questionable",
+ "safe_mode",
+ "smarty_pants",
+ "smedberg",
+ "strike",
+ "tables",
+ "tag_friendly",
+ "task_list",
+ "toc",
+ "underline",
+ "unicode",
+ "wiki_tables",
+ "xinclude",
+ "xml",
+]
diff --git a/test/README.md b/test/README.md
index 8c510658..154e9bba 100644
--- a/test/README.md
+++ b/test/README.md
@@ -1,3 +1,5 @@
+## Introduction
+
This directory holds test suite. There are a number of test sets, each in its own directory:
- **tm-cases**: Cases I wrote while writing markdown2.py. Many of these are
@@ -9,80 +11,59 @@ This directory holds test suite. There are a number of test sets, each in its ow
- **php-markdown-extra-cases**: Test cases included in the MDTest package
(same as above) testing extra Markdown syntax that only PHP Markdown implements.
+## markdown2.py test results
-# markdown2.py test results
-
-To run the test suite:
+To run all the tests:
- python test.py [TAGS...]
+ pytest -vss test
+ (or within the test dir)
+ pytest -vss .
-The test driver used (testlib.py) allows one to filter the tests run via short
-strings that identify specific or groups of tests. Run `python test.py -l` to
+The test driver used (pytest) allows one to filter the tests run via short
+strings that identify specific or groups of tests. Run `pytest test --list` to
list all available tests and their names/tags. I use the "knownfailure" tag to
mark those tests that I know fail (e.g. the `php-markdown-extra-cases` all fail
because markdown2.py doesn't implement those additions to the Markdown syntax).
To run the test suite **without** the known failures:
- $ python test.py -- -knownfailure
- markdown2/tm/auto_link ... ok
- markdown2/tm/blockquote ... ok
- markdown2/tm/blockquote_with_pre ... ok
- markdown2/tm/code_block_with_tabs [fromphpmarkdown] ... ok
- markdown2/tm/code_safe_emphasis [code_safe] ... ok
- markdown2/tm/codeblock ... ok
- markdown2/tm/codespans ... ok
- markdown2/tm/emphasis ... ok
- markdown2/tm/escapes ... ok
- markdown2/tm/header ... ok
- markdown2/tm/hr ... ok
- markdown2/tm/inline_links ... ok
- markdown2/tm/lists ... ok
- markdown2/tm/nested_list ... ok
- markdown2/tm/parens_in_url_4 [fromphpmarkdown] ... ok
- markdown2/tm/raw_html ... ok
- markdown2/tm/ref_links ... ok
- markdown2/tm/safe_mode ... ok
- markdown2/tm/sublist-para [questionable] ... ok
- markdown2/tm/tricky_anchors ... ok
- markdown2/tm/underline_in_autolink ... ok
- markdown2/markdowntest/amps_and_angle_encoding ... ok
- markdown2/markdowntest/auto_links ... ok
- markdown2/markdowntest/backslash_escapes ... ok
- markdown2/markdowntest/blockquotes_with_code_blocks ... ok
- markdown2/markdowntest/hard-wrapped_paragraphs_with_list-like_lines ... ok
- markdown2/markdowntest/horizontal_rules ... ok
- markdown2/markdowntest/inline_html_simple ... ok
- markdown2/markdowntest/inline_html_comments ... ok
- markdown2/markdowntest/links_inline_style ... ok
- markdown2/markdowntest/links_reference_style ... ok
- markdown2/markdowntest/literal_quotes_in_titles ... ok
- markdown2/markdowntest/markdown_documentation_basics ... ok
- markdown2/markdowntest/markdown_documentation_syntax ... ok
- markdown2/markdowntest/nested_blockquotes ... ok
- markdown2/markdowntest/ordered_and_unordered_lists ... ok
- markdown2/markdowntest/strong_and_em_together ... ok
- markdown2/markdowntest/tabs ... ok
- markdown2/phpmarkdown/backslash_escapes ... ok
- markdown2/phpmarkdown/code_spans ... ok
- markdown2/phpmarkdown/email_auto_links ... ok
- markdown2/phpmarkdown/headers ... ok
- markdown2/phpmarkdown/images_untitled ... ok
- markdown2/phpmarkdown/inline_html_comments ... ok
- markdown2/phpmarkdown/ins_&_del ... ok
- markdown2/phpmarkdown/links_inline_style ... ok
- markdown2/phpmarkdown/md5_hashes ... ok
- markdown2/phpmarkdown/php-specific_bugs ... ok
- markdown2/phpmarkdown/tight_blocks ... ok
- markdown2/direct/code_in_strong [code, strong] ... ok
- markdown2/direct/pre ... ok
- markdown2/direct/starter_pre [pre, recipes] ... ok
-
- ----------------------------------------------------------------------
- Ran 52 tests in 0.799s
-
- OK
-
-
-TODO: Add details about which tests in the various test sets that markdown2.py
+ $ pytest test_rendering.py -m "not knownfailure
+ test/test_rendering.py::test_render[tm-cases-CVE-2018-5773.text] PASSED
+ test/test_rendering.py::test_render[tm-cases-ampersands.text] PASSED
+ test/test_rendering.py::test_render[tm-cases-auto_link.text] FAILED
+ test/test_rendering.py::test_render[tm-cases-auto_link_email_with_underscore.text] FAILED
+ ....
+ ....
+ 5 failed, 146 passed, 15 deselected in 0.33s
+
+
+## Examples
+
+List all tests (and tags):
+
+ pytest test --list
+
+Run all tests:
+
+ pytest -vss test
+
+Run one single (named test):
+
+ pytest -s test/test_rendering.py::test_render[tm-cases-codespans.text]
+
+Run all tests with a particular flag:
+
+ pytest -vvs test -m pygments
+
+RUn all test with flags matching an expression:
+
+ pytest -vvs test -m "pygments and not fenced_code_blocks"
+
+
+**NOTE**: All the commands are executed from the top most check out directory, PYTHONPATH is set to lib.
+
+**NOTE**: pass the -vv flag to display verbose logging and -s flag to prevent pytest to capture the stdout/stderr (for debug)
+
+
+**TODO**: Add details about which tests in the various test sets that markdown2.py
fails... and why I'm not concerned about them.
diff --git a/test/api.doctests b/test/api.doctests
deleted file mode 100644
index c04470ce..00000000
--- a/test/api.doctests
+++ /dev/null
@@ -1,17 +0,0 @@
-
->>> import markdown2
->>> hasattr(markdown2, "__version__")
-True
->>> hasattr(markdown2, "__version_info__")
-True
-
->>> str( markdown2.markdown("*boo*") )
-'
boo
\n'
-
->>> m = markdown2.Markdown()
->>> str( m.convert("*boo*") )
-'boo
\n'
-
->>> m = markdown2.MarkdownWithExtras()
->>> str( m.convert("*boo*") )
-'boo
\n'
diff --git a/test/conftest.py b/test/conftest.py
new file mode 100644
index 00000000..ba15707e
--- /dev/null
+++ b/test/conftest.py
@@ -0,0 +1,90 @@
+import re # pylint: disable=unused-import
+import os
+import warnings
+from pathlib import Path
+
+import pytest
+
+ALL_SUBDIRS = []
+
+
+def _gather_md_tests(srcdir):
+ def sanitize(txt):
+ return txt.replace(" ", "").replace("-", "_").replace(".", "_")
+
+ class Item:
+ def __init__(self, src):
+ self.src = src
+ self.expected = src.with_suffix(".html")
+ assert self.expected.exists()
+
+ @property
+ def markers(self):
+ src = self.src.with_suffix(".tags")
+ if src.exists():
+ marks = []
+ with src.open() as fp:
+ for line in fp:
+ if not line.strip() or line.strip().startswith("#"):
+ continue
+ marks.extend(line.partition("#")[0].split())
+ return [sanitize(m) for m in marks]
+
+ @property
+ def options(self):
+ src = self.src.with_suffix(".opts")
+ if not src.exists():
+ return
+ with warnings.catch_warnings():
+ # files as link_patterns_double_hit.opts trigger "invalid escape sequence \s"
+ warnings.simplefilter("ignore")
+ return eval(src.read_text())
+
+ items = {}
+ for path in srcdir.glob("*.text"):
+ name = "{parent}-{name}".format(parent=srcdir.name, name=path.with_suffix('').name)
+ assert name not in items
+ items[name] = Item(path)
+ return items
+
+
+def parametrize(arguments, subdir):
+ global ALL_SUBDIRS
+ datadir = Path(os.getenv("DATADIR", Path(__file__).parent)).absolute()
+ srcdir = datadir / subdir
+
+ ALL_SUBDIRS.append(srcdir)
+ items = _gather_md_tests(srcdir)
+
+ # we build the parametrized
+ def _fn(fn):
+ parameters = []
+ for name, item in sorted(items.items()):
+ marks = [getattr(pytest.mark, m) for m in item.markers or []]
+ args = (item.src, item.expected, item.options)
+ if "marks" in arguments:
+ args = [*args, set(m.name for m in marks)]
+ kwargs = {"id": name, "marks": marks}
+ param = pytest.param(*args, **kwargs)
+ parameters.append(param)
+ return pytest.mark.parametrize(arguments, parameters)(fn)
+
+ return _fn
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("helloworld")
+ group.addoption(
+ "--list",
+ action="store_true",
+ dest="list-tests",
+ help="list all tests",
+ )
+
+
+def pytest_collection_finish(session):
+ if getattr(session.config.option, "list-tests"):
+ for subdir in ALL_SUBDIRS:
+ for name, item in sorted(_gather_md_tests(subdir).items()):
+ print("{} {}".format(name, item.markers))
+ pytest.exit("Done!")
diff --git a/test/php-markdown-cases/Email auto links.tags b/test/php-markdown-cases/Email auto links.tags
new file mode 100644
index 00000000..9dc50f35
--- /dev/null
+++ b/test/php-markdown-cases/Email auto links.tags
@@ -0,0 +1 @@
+htmlentities unicode
\ No newline at end of file
diff --git a/test/php-markdown-extra-cases/Abbr.tags b/test/php-markdown-extra-cases/Abbr.tags
index 0019958f..0723c8bc 100644
--- a/test/php-markdown-extra-cases/Abbr.tags
+++ b/test/php-markdown-extra-cases/Abbr.tags
@@ -1 +1 @@
-knownfailure
+knownfailure unicode
diff --git a/test/test.py b/test/test.py
deleted file mode 100755
index c69df78f..00000000
--- a/test/test.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2007-2008 ActiveState Software Inc.
-# License: MIT (http://www.opensource.org/licenses/mit-license.php)
-
-"""The markdown2 test suite entry point."""
-
-import os
-from os.path import exists, join, abspath, dirname, normpath
-import sys
-import logging
-
-import testlib
-
-log = logging.getLogger("test")
-testdir_from_ns = {
- None: os.curdir,
-}
-
-def setup():
- top_dir = dirname(dirname(abspath(__file__)))
- lib_dir = join(top_dir, "lib")
- sys.path.insert(0, lib_dir)
-
- # Attempt to get 'pygments' on the import path.
- try:
- # If already have it, use that one.
- import pygments
- except ImportError:
- pygments_dir = join(top_dir, "deps", "pygments")
- if sys.version_info[0] <= 2:
- sys.path.insert(0, pygments_dir)
- else:
- sys.path.insert(0, pygments_dir + "3")
-
-if __name__ == "__main__":
- logging.basicConfig()
-
- setup()
- default_tags = []
- try:
- import pygments
- except ImportError:
- log.warning("skipping pygments tests ('pygments' module not found)")
- default_tags.append("-pygments")
-
- retval = testlib.harness(testdir_from_ns=testdir_from_ns,
- default_tags=default_tags)
- sys.exit(retval)
diff --git a/test/test_api.py b/test/test_api.py
new file mode 100644
index 00000000..a71152ed
--- /dev/null
+++ b/test/test_api.py
@@ -0,0 +1,19 @@
+# port of api.doctests
+import markdown2
+
+
+def test_version():
+ "check if the markdown2 package has version information"
+ assert hasattr(markdown2, "__version__")
+ assert hasattr(markdown2, "__version_info__")
+
+
+def test_markdown():
+ "various ways to call the api"
+ assert markdown2.markdown("*boo*") == "boo
\n"
+
+ m = markdown2.Markdown()
+ assert m.convert("*boo*") == "boo
\n"
+
+ m = markdown2.MarkdownWithExtras()
+ assert m.convert("*boo*") == "boo
\n"
diff --git a/test/test_markdown2.py b/test/test_markdown2.py
deleted file mode 100755
index 363934f8..00000000
--- a/test/test_markdown2.py
+++ /dev/null
@@ -1,597 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2007-2008 ActiveState Software Inc.
-# License: MIT (http://www.opensource.org/licenses/mit-license.php)
-
-"""Test the Python markdown2.py."""
-
-import os
-import sys
-from os.path import join, dirname, abspath, exists, splitext, basename
-import re
-from glob import glob
-from pprint import pprint
-import unittest
-import codecs
-import difflib
-import doctest
-from json import loads as json_loads
-
-from testlib import TestError, TestSkipped, tag
-
-sys.path.insert(0, join(dirname(dirname(abspath(__file__)))))
-try:
- import markdown2
-finally:
- del sys.path[0]
-
-
-
-#---- Python version compat
-
-# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
-if sys.version_info[0] <= 2:
- py3 = False
- try:
- bytes
- except NameError:
- bytes = str
- base_string_type = basestring
-elif sys.version_info[0] >= 3:
- py3 = True
- unicode = str
- base_string_type = str
- unichr = chr
-
-
-
-#---- Test cases
-
-class _MarkdownTestCase(unittest.TestCase):
- """Helper class for Markdown tests."""
-
- maxDiff = None
-
- def _assertMarkdownParity(self, text):
- """Assert that markdown2.py produces same output as Markdown.pl."""
- #TODO add normalization
- python_html = markdown2.markdown(text)
- perl_html = _markdown_with_perl(text)
-
- close_though = ""
- if python_html != perl_html \
- and (python_html.replace('\n', '')
- == perl_html.replace('\n', '')):
- close_though = " (close though -- all but EOLs match)"
-
- self.assertEqual(python_html, perl_html, _dedent("""\
- markdown2.py didn't produce the same output as Markdown.pl%s:
- ---- text ----
- %s ---- Python markdown2.py HTML ----
- %s ---- Perl Markdown.pl HTML ----
- %s""") % (close_though, _display(text),
- _display(python_html), _display(perl_html)))
-
- def _assertMarkdownPath(self, text_path, encoding="utf-8", opts=None,
- toc_html_path=None, metadata_path=None):
- text = codecs.open(text_path, 'r', encoding=encoding).read()
- html_path = splitext(text_path)[0] + ".html"
- html = codecs.open(html_path, 'r', encoding=encoding).read()
- extra = {}
- if toc_html_path:
- extra["toc_html"] = codecs.open(toc_html_path, 'r', encoding=encoding).read()
- extra["toc_html_path"] = toc_html_path
- if metadata_path:
- extra["metadata"] = json_loads(
- codecs.open(metadata_path, 'r', encoding=encoding).read())
- extra["metadata_path"] = metadata_path
- self._assertMarkdown(text, html, text_path, html_path, opts=opts, **extra)
-
- def _assertMarkdown(self, text, html, text_path=None, html_path=None,
- opts=None, toc_html=None, toc_html_path=None, metadata=None,
- metadata_path=None):
- """Assert that markdown2.py produces the expected HTML."""
- if text_path is None: text_path = ""
- if html_path is None: html_path = ""
- if opts is None:
- opts = {}
-
- norm_html = norm_html_from_html(html)
- python_html = markdown2.markdown(text, **opts)
- python_norm_html = norm_html_from_html(python_html)
-
- close_though = ""
- if python_norm_html != norm_html \
- and (python_norm_html.replace('\n', '')
- == norm_html.replace('\n', '')):
- close_though = " (close though -- all but EOLs match)"
-
- diff = ''
- if python_norm_html != norm_html:
- diff = difflib.unified_diff(
- norm_html.splitlines(1),
- python_norm_html.splitlines(1),
- html_path,
- "markdown2 "+text_path)
- diff = ''.join(diff)
- errmsg = _dedent("""\
- markdown2.py didn't produce the expected HTML%s:
- ---- text (escaping: .=space, \\n=newline) ----
- %s ---- Python markdown2.py HTML (escaping: .=space, \\n=newline) ----
- %s ---- expected HTML (escaping: .=space, \\n=newline) ----
- %s ---- diff ----
- %s""") % (close_though, _display(text),
- _display(python_html), _display(html),
- _indent(diff))
-
- def charreprreplace(exc):
- if not isinstance(exc, UnicodeEncodeError):
- raise TypeError("don't know how to handle %r" % exc)
- if py3:
- obj_repr = repr(exc.object[exc.start:exc.end])[1:-1]
- else:
- # repr -> remote "u'" and "'"
- obj_repr = repr(exc.object[exc.start:exc.end])[2:-1]
- return (unicode(obj_repr), exc.end)
- codecs.register_error("charreprreplace", charreprreplace)
-
- self.assertEqual(python_norm_html, norm_html, errmsg)
-
- if toc_html:
- python_toc_html = python_html.toc_html
- python_norm_toc_html = norm_html_from_html(python_toc_html)
- norm_toc_html = norm_html_from_html(toc_html)
-
- diff = ''
- if python_norm_toc_html != norm_toc_html:
- diff = difflib.unified_diff(
- norm_toc_html.splitlines(1),
- python_norm_toc_html.splitlines(1),
- toc_html_path,
- "`markdown2 %s`.toc_html" % text_path)
- diff = ''.join(diff)
- errmsg = _dedent("""\
- markdown2.py didn't produce the expected TOC HTML%s:
- ---- text (escaping: .=space, \\n=newline) ----
- %s ---- Python markdown2.py TOC HTML (escaping: .=space, \\n=newline) ----
- %s ---- expected TOC HTML (escaping: .=space, \\n=newline) ----
- %s ---- diff ----
- %s""") % (close_though, _display(text),
- _display(python_toc_html), _display(toc_html),
- _indent(diff))
- self.assertEqual(python_norm_toc_html, norm_toc_html,
- errmsg.encode('utf-8', 'charreprreplace'))
-
- if metadata:
- self.assertEqual(python_html.metadata, metadata)
-
- def generate_tests(cls):
- """Add test methods to this class for each test file in
- `cls.cases_dir'.
- """
- cases_pat = join(dirname(__file__), cls.cases_dir, "*.text")
- for text_path in glob(cases_pat):
- # Load an options (`*.opts` file, if any).
- # It must be a Python dictionary. It will be passed as
- # kwargs to the markdown function.
- opts = {}
- opts_path = splitext(text_path)[0] + ".opts"
- if exists(opts_path):
- try:
- opts = eval(open(opts_path, 'r').read())
- except Exception:
- _, ex, _ = sys.exc_info()
- print("WARNING: couldn't load `%s' opts file: %s" \
- % (opts_path, ex))
-
- toc_html_path = splitext(text_path)[0] + ".toc_html"
- if not exists(toc_html_path):
- toc_html_path = None
- metadata_path = splitext(text_path)[0] + ".metadata"
- if not exists(metadata_path):
- metadata_path = None
-
- test_func = lambda self, t=text_path, o=opts, c=toc_html_path, \
- m=metadata_path: \
- self._assertMarkdownPath(t, opts=o, toc_html_path=c,
- metadata_path=m)
-
- tags_path = splitext(text_path)[0] + ".tags"
- if exists(tags_path):
- tags = []
- for line in open(tags_path):
- if '#' in line: # allow comments in .tags files
- line = line[:line.index('#')]
- tags += line.split()
- test_func.tags = tags
-
- name = splitext(basename(text_path))[0]
- name = name.replace(' - ', '_')
- name = name.replace(' ', '_')
- name = re.sub("[(),]", "", name)
- test_name = "test_%s" % name
- setattr(cls, test_name, test_func)
- generate_tests = classmethod(generate_tests)
-
-class TMTestCase(_MarkdownTestCase):
- cases_dir = "tm-cases"
-
-class MarkdownTestTestCase(_MarkdownTestCase):
- """Test cases from MarkdownTest-1.0."""
- cases_dir = "markdowntest-cases"
-
-class PHPMarkdownTestCase(_MarkdownTestCase):
- """Test cases from MDTest."""
- cases_dir = "php-markdown-cases"
-
-class PHPMarkdownExtraTestCase(_MarkdownTestCase):
- """Test cases from MDTest.
-
- These are all knownfailures because these test non-standard Markdown
- syntax no implemented in markdown2.py. See
- for details.
- """
- cases_dir = "php-markdown-extra-cases"
-
-
-class DirectTestCase(_MarkdownTestCase):
- """These are specific test that I found were broken in
- Python-markdown (markdown.py).
- """
-
- def test_slow_hr(self):
- import time
- text = """\
-* * *
-
-This on *almost* looks like an hr, except for the trailing '+'. In older
-versions of markdown2.py this was pathologically slow:
-
-- - - - - - - - - - - - - - - - - - - - - - - - - +
-"""
- html = """\
-
-
-This on almost looks like an hr, except for the trailing '+'. In older
-versions of markdown2.py this was pathologically slow:
-
-- - - - - - - - - - - - - - - - - - - - - - - - - +
-"""
- start = time.time()
- self._assertMarkdown(text, html)
- end = time.time()
- delta = end - start
- self.assertTrue(delta < 1.0, "It took more than 1s to process "
- "'slow-hr'. It took %.2fs. Too slow!" % delta)
- test_slow_hr.tags = ["perf"]
-
- def test_code_in_strong(self):
- self._assertMarkdown(
- '**look at `this code` call**',
- 'look at this code
call
\n')
- test_code_in_strong.tags = ["code", "strong"]
-
- def test_starter_pre(self):
- self._assertMarkdown(
- _indent('#!/usr/bin/python\nprint "hi"'),
- '#!/usr/bin/python\nprint "hi"\n
\n')
- test_starter_pre.tags = ["pre", "recipes"]
-
- def test_pre(self):
- self._assertMarkdown(_dedent('''\
- some starter text
-
- #!/usr/bin/python
- print "hi"'''),
- 'some starter text
\n\n#!/usr/bin/python\nprint "hi"\n
\n')
-
- def test_russian(self):
- ko = '\u043b\u0449' # 'ko' on russian keyboard
- self._assertMarkdown("## %s" % ko,
- '%s
\n' % ko)
- test_russian.tags = ["unicode", "issue3"]
-
- def test_toc_with_persistent_object(self):
- """
- Tests that the toc is the same every time it's run on HTML, even if the Markdown object isn't disposed of.
- """
- md = markdown2.Markdown(extras=["toc"])
- html = """
-# Header 1
-## Header 1.1
-## Header 1.2
-### Header 1.3
-# Header 2
-## Header 2.1
- """
- expected_toc_html = """
-"""
- self.assertEqual(expected_toc_html, md.convert(html).toc_html)
- # Do it again, to check if the toc_html is just appended rather than replaced
- self.assertEqual(expected_toc_html, md.convert(html).toc_html)
- # Create different html, and confirm toc_html is replaced
- html = """
-# I'm new html
-## I don't have to be long, just different
-"""
- expected_toc_html = """
-"""
- self.assertEqual(expected_toc_html, md.convert(html).toc_html)
- test_toc_with_persistent_object.tags = ["toc", "issue208"]
-
-
-class DocTestsTestCase(unittest.TestCase):
- def test_api(self):
- test = doctest.DocFileTest("api.doctests")
- test.runTest()
-
- # Don't bother on Python 3 because (a) there aren't many inline doctests,
- # and (b) they are more to be didactic than comprehensive test suites.
- if not py3:
- def test_internal(self):
- doctest.testmod(markdown2)
-
-
-
-#---- internal support stuff
-
-_xml_escape_re = re.compile(r'(x[0-9A-Fa-f]{2,3}|[0-9]{2,3});')
-def _xml_escape_sub(match):
- escape = match.group(1)
- if escape[0] == 'x':
- return unichr(int('0'+escape, base=16))
- else:
- return unichr(int(escape))
-
-_markdown_email_link_re = re.compile(r'(.*?)', re.U)
-def _markdown_email_link_sub(match):
- href, text = match.groups()
- href = _xml_escape_re.sub(_xml_escape_sub, href)
- text = _xml_escape_re.sub(_xml_escape_sub, text)
- return '%s' % (href, text)
-
-def norm_html_from_html(html):
- """Normalize (somewhat) Markdown'd HTML.
-
- Part of Markdown'ing involves obfuscating email links with
- randomize encoding. Undo that obfuscation.
-
- Also normalize EOLs.
- """
- if not isinstance(html, unicode):
- html = html.decode('utf-8')
- html = _markdown_email_link_re.sub(
- _markdown_email_link_sub, html)
- if sys.platform == "win32":
- html = html.replace('\r\n', '\n')
- return html
-
-
-def _display(s):
- """Markup the given string for useful display."""
- if not isinstance(s, unicode):
- s = s.decode("utf-8")
- s = _indent(_escaped_text_from_text(s, "whitespace"), 4)
- if not s.endswith('\n'):
- s += '\n'
- return s
-
-def _markdown_with_perl(text):
- markdown_pl = join(dirname(__file__), "Markdown.pl")
- if not exists(markdown_pl):
- raise OSError("`%s' does not exist: get it from "
- "http://daringfireball.net/projects/markdown/"
- % markdown_pl)
-
- i, o = os.popen2("perl %s" % markdown_pl)
- i.write(text)
- i.close()
- html = o.read()
- o.close()
- return html
-
-
-# Recipe: dedent (0.1.2)
-def _dedentlines(lines, tabsize=8, skip_first_line=False):
- """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
-
- "lines" is a list of lines to dedent.
- "tabsize" is the tab width to use for indent width calculations.
- "skip_first_line" is a boolean indicating if the first line should
- be skipped for calculating the indent width and for dedenting.
- This is sometimes useful for docstrings and similar.
-
- Same as dedent() except operates on a sequence of lines. Note: the
- lines list is modified **in-place**.
- """
- DEBUG = False
- if DEBUG:
- print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
- % (tabsize, skip_first_line))
- indents = []
- margin = None
- for i, line in enumerate(lines):
- if i == 0 and skip_first_line: continue
- indent = 0
- for ch in line:
- if ch == ' ':
- indent += 1
- elif ch == '\t':
- indent += tabsize - (indent % tabsize)
- elif ch in '\r\n':
- continue # skip all-whitespace lines
- else:
- break
- else:
- continue # skip all-whitespace lines
- if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
- if margin is None:
- margin = indent
- else:
- margin = min(margin, indent)
- if DEBUG: print("dedent: margin=%r" % margin)
-
- if margin is not None and margin > 0:
- for i, line in enumerate(lines):
- if i == 0 and skip_first_line: continue
- removed = 0
- for j, ch in enumerate(line):
- if ch == ' ':
- removed += 1
- elif ch == '\t':
- removed += tabsize - (removed % tabsize)
- elif ch in '\r\n':
- if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
- lines[i] = lines[i][j:]
- break
- else:
- raise ValueError("unexpected non-whitespace char %r in "
- "line %r while removing %d-space margin"
- % (ch, line, margin))
- if DEBUG:
- print("dedent: %r: %r -> removed %d/%d"\
- % (line, ch, removed, margin))
- if removed == margin:
- lines[i] = lines[i][j+1:]
- break
- elif removed > margin:
- lines[i] = ' '*(removed-margin) + lines[i][j+1:]
- break
- else:
- if removed:
- lines[i] = lines[i][removed:]
- return lines
-
-def _dedent(text, tabsize=8, skip_first_line=False):
- """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
-
- "text" is the text to dedent.
- "tabsize" is the tab width to use for indent width calculations.
- "skip_first_line" is a boolean indicating if the first line should
- be skipped for calculating the indent width and for dedenting.
- This is sometimes useful for docstrings and similar.
-
- textwrap.dedent(s), but don't expand tabs to spaces
- """
- lines = text.splitlines(1)
- _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
- return ''.join(lines)
-
-# Recipe: indent (0.2.1)
-def _indent(s, width=4, skip_first_line=False):
- """_indent(s, [width=4]) -> 's' indented by 'width' spaces
-
- The optional "skip_first_line" argument is a boolean (default False)
- indicating if the first line should NOT be indented.
- """
- lines = s.splitlines(1)
- indentstr = ' '*width
- if skip_first_line:
- return indentstr.join(lines)
- else:
- return indentstr + indentstr.join(lines)
-
-
-# Recipe: text_escape (0.1)
-def _escaped_text_from_text(text, escapes="eol"):
- r"""Return escaped version of text.
-
- "escapes" is either a mapping of chars in the source text to
- replacement text for each such char or one of a set of
- strings identifying a particular escape style:
- eol
- replace EOL chars with '\r' and '\n', maintain the actual
- EOLs though too
- whitespace
- replace EOL chars as above, tabs with '\t' and spaces
- with periods ('.')
- eol-one-line
- replace EOL chars with '\r' and '\n'
- whitespace-one-line
- replace EOL chars as above, tabs with '\t' and spaces
- with periods ('.')
- """
- #TODO:
- # - Add 'c-string' style.
- # - Add _escaped_html_from_text() with a similar call sig.
- import re
-
- if isinstance(escapes, base_string_type):
- if escapes == "eol":
- escapes = {'\r\n': "\\r\\n\r\n", '\n': "\\n\n", '\r': "\\r\r"}
- elif escapes == "whitespace":
- escapes = {'\r\n': "\\r\\n\r\n", '\n': "\\n\n", '\r': "\\r\r",
- '\t': "\\t", ' ': "."}
- elif escapes == "eol-one-line":
- escapes = {'\n': "\\n", '\r': "\\r"}
- elif escapes == "whitespace-one-line":
- escapes = {'\n': "\\n", '\r': "\\r", '\t': "\\t", ' ': '.'}
- else:
- raise ValueError("unknown text escape style: %r" % escapes)
-
- # Sort longer replacements first to allow, e.g. '\r\n' to beat '\r' and
- # '\n'.
- escapes_keys = list(escapes.keys())
- escapes_keys.sort(key=lambda a: len(a), reverse=True)
- def repl(match):
- val = escapes[match.group(0)]
- return val
- escaped = re.sub("(%s)" % '|'.join([re.escape(k) for k in escapes_keys]),
- repl,
- text)
-
- return escaped
-
-def _one_line_summary_from_text(text, length=78,
- escapes={'\n':"\\n", '\r':"\\r", '\t':"\\t"}):
- r"""Summarize the given text with one line of the given length.
-
- "text" is the text to summarize
- "length" (default 78) is the max length for the summary
- "escapes" is a mapping of chars in the source text to
- replacement text for each such char. By default '\r', '\n'
- and '\t' are escaped with their '\'-escaped repr.
- """
- if len(text) > length:
- head = text[:length-3]
- else:
- head = text
- escaped = _escaped_text_from_text(head, escapes)
- if len(text) > length:
- summary = escaped[:length-3] + "..."
- else:
- summary = escaped
- return summary
-
-
-#---- hook for testlib
-
-def test_cases():
- """This is called by test.py to build up the test cases."""
- TMTestCase.generate_tests()
- yield TMTestCase
- MarkdownTestTestCase.generate_tests()
- yield MarkdownTestTestCase
- PHPMarkdownTestCase.generate_tests()
- yield PHPMarkdownTestCase
- PHPMarkdownExtraTestCase.generate_tests()
- yield PHPMarkdownExtraTestCase
- yield DirectTestCase
- yield DocTestsTestCase
diff --git a/test/test_rendering.py b/test/test_rendering.py
new file mode 100644
index 00000000..9bd4ba71
--- /dev/null
+++ b/test/test_rendering.py
@@ -0,0 +1,42 @@
+import conftest
+import html
+import markdown2
+
+def read_text(path, marks):
+ from html import unescape
+ encoding = "utf-8" if "unicode" in marks else None
+ txt = path.read_text(encoding=encoding)
+ if "htmlentities" in marks:
+ return unescape(txt)
+ return txt
+
+
+def read_and_process(path, options, marks):
+ encoding = "utf-8" if "unicode" in marks else None
+ txt = markdown2.markdown(path.read_text(encoding=encoding), **(options or {}))
+ if "htmlentities" in marks:
+ return html.unescape(txt)
+ return txt
+
+
+@conftest.parametrize("source, expected, options, marks", "tm-cases")
+def test_tm_cases(source, expected, options, marks):
+ found = read_and_process(source, options, marks)
+ assert found == read_text(expected, marks)
+
+
+@conftest.parametrize("source, expected, options", "markdowntest-cases")
+def test_markdowntest_cases(source, expected, options):
+ found = markdown2.markdown(source.read_text(), **(options or {}))
+ assert found == expected.read_text()
+
+
+@conftest.parametrize("source, expected, options, marks", "php-markdown-cases")
+def test_php_markdown_cases(source, expected, options, marks):
+ found = read_and_process(source, options, marks)
+ assert found == read_text(expected, marks)
+
+@conftest.parametrize("source, expected, options", "php-markdown-extra-cases")
+def test_php_markdown_extra_cases(source, expected, options):
+ found = markdown2.markdown(source.read_text(), **(options or {}))
+ assert found == expected.read_text()
diff --git a/test/testlib.py b/test/testlib.py
deleted file mode 100644
index 06015404..00000000
--- a/test/testlib.py
+++ /dev/null
@@ -1,715 +0,0 @@
-#!python
-# Copyright (c) 2000-2008 ActiveState Software Inc.
-# License: MIT License (http://www.opensource.org/licenses/mit-license.php)
-
-"""
- test suite harness
-
- Usage:
-
- test --list [...] # list available tests modules
- test [...] # run test modules
-
- Options:
- -v, --verbose more verbose output
- -q, --quiet don't print anything except if a test fails
- -d, --debug log debug information
- -h, --help print this text and exit
- -l, --list Just list the available test modules. You can also
- specify tags to play with module filtering.
- -n, --no-default-tags Ignore default tags
- -L Specify a logging level via
- :
- For example:
- codeintel.db:DEBUG
- This option can be used multiple times.
-
- By default this will run all tests in all available "test_*" modules.
- Tags can be specified to control which tests are run. For example:
-
- test python # run tests with the 'python' tag
- test python cpln # run tests with both 'python' and 'cpln' tags
- test -- -python # exclude tests with the 'python' tag
- # (the '--' is necessary to end the option list)
-
- The full name and base name of a test module are implicit tags for that
- module, e.g. module "test_xdebug.py" has tags "test_xdebug" and "xdebug".
- A TestCase's class name (with and without "TestCase") is an implicit
- tag for an test_* methods. A "test_foo" method also has "test_foo"
- and "foo" implicit tags.
-
- Tags can be added explicitly added:
- - to modules via a __tags__ global list; and
- - to individual test_* methods via a "tags" attribute list (you can
- use the testlib.tag() decorator for this).
-"""
-#TODO:
-# - Document how tests are found (note the special "test_cases()" and
-# "test_suite_class" hooks).
-# - See the optparse "TODO" below.
-# - Make the quiet option actually quiet.
-
-__version_info__ = (0, 6, 6)
-__version__ = '.'.join(map(str, __version_info__))
-
-
-import os
-from os.path import join, basename, dirname, abspath, splitext, \
- isfile, isdir, normpath, exists
-import sys
-import getopt
-import glob
-import time
-import types
-import tempfile
-import unittest
-from pprint import pprint
-import imp
-import optparse
-import logging
-import textwrap
-import traceback
-
-
-
-#---- globals and exceptions
-
-log = logging.getLogger("test")
-
-
-
-#---- exports generally useful to test cases
-
-class TestError(Exception):
- pass
-
-class TestSkipped(Exception):
- """Raise this to indicate that a test is being skipped.
-
- ConsoleTestRunner knows to interpret these at NOT failures.
- """
- pass
-
-class TestFailed(Exception):
- pass
-
-def tag(*tags):
- """Decorator to add tags to test_* functions.
-
- Example:
- class MyTestCase(unittest.TestCase):
- @testlib.tag("knownfailure")
- def test_foo(self):
- #...
- """
- def decorate(f):
- if not hasattr(f, "tags"):
- f.tags = []
- f.tags += tags
- return f
- return decorate
-
-
-#---- timedtest decorator
-# Use this to assert that a test completes in a given amount of time.
-# This is from http://www.artima.com/forums/flat.jsp?forum=122&thread=129497
-# Including here, because it might be useful.
-# NOTE: Untested and I suspect some breakage.
-
-TOLERANCE = 0.05
-
-class DurationError(AssertionError): pass
-
-def timedtest(max_time, tolerance=TOLERANCE):
- """ timedtest decorator
- decorates the test method with a timer
- when the time spent by the test exceeds
- max_time in seconds, an Assertion error is thrown.
- """
- def _timedtest(function):
- def wrapper(*args, **kw):
- start_time = time.time()
- try:
- function(*args, **kw)
- finally:
- total_time = time.time() - start_time
- if total_time > max_time + tolerance:
- raise DurationError(('Test was too long (%.2f s)'
- % total_time))
- return wrapper
-
- return _timedtest
-
-
-
-#---- module api
-
-class Test(object):
- def __init__(self, ns, testmod, testcase, testfn_name,
- testsuite_class=None):
- self.ns = ns
- self.testmod = testmod
- self.testcase = testcase
- self.testfn_name = testfn_name
- self.testsuite_class = testsuite_class
- # Give each testcase some extra testlib attributes for useful
- # introspection on TestCase instances later on.
- self.testcase._testlib_shortname_ = self.shortname()
- self.testcase._testlib_explicit_tags_ = self.explicit_tags()
- self.testcase._testlib_implicit_tags_ = self.implicit_tags()
- def __str__(self):
- return self.shortname()
- def __repr__(self):
- return "" % self.shortname()
- def shortname(self):
- bits = [self._normname(self.testmod.__name__),
- self._normname(self.testcase.__class__.__name__),
- self._normname(self.testfn_name)]
- if self.ns:
- bits.insert(0, self.ns)
- return '/'.join(bits)
- def _flatten_tags(self, tags):
- """Split tags with '/' in them into multiple tags.
-
- '/' is the reserved tag separator and allowing tags with
- embedded '/' results in one being unable to select those via
- filtering. As long as tag order is stable then presentation of
- these subsplit tags should be fine.
- """
- flattened = []
- for t in tags:
- flattened += t.split('/')
- return flattened
- def explicit_tags(self):
- tags = []
- if hasattr(self.testmod, "__tags__"):
- tags += self.testmod.__tags__
- if hasattr(self.testcase, "__tags__"):
- tags += self.testcase.__tags__
- testfn = getattr(self.testcase, self.testfn_name)
- if hasattr(testfn, "tags"):
- tags += testfn.tags
- return self._flatten_tags(tags)
- def implicit_tags(self):
- tags = [
- self.testmod.__name__.lower(),
- self._normname(self.testmod.__name__),
- self.testcase.__class__.__name__.lower(),
- self._normname(self.testcase.__class__.__name__),
- self.testfn_name,
- self._normname(self.testfn_name),
- ]
- if self.ns:
- tags.insert(0, self.ns)
- return self._flatten_tags(tags)
- def tags(self):
- return self.explicit_tags() + self.implicit_tags()
- def doc(self):
- testfn = getattr(self.testcase, self.testfn_name)
- return testfn.__doc__ or ""
- def _normname(self, name):
- if name.startswith("test_"):
- return name[5:].lower()
- elif name.startswith("test"):
- return name[4:].lower()
- elif name.endswith("TestCase"):
- return name[:-8].lower()
- else:
- return name
-
-
-def testmod_paths_from_testdir(testdir):
- """Generate test module paths in the given dir."""
- for path in glob.glob(join(testdir, "test_*.py")):
- yield path
-
- for path in glob.glob(join(testdir, "test_*")):
- if not isdir(path): continue
- if not isfile(join(path, "__init__.py")): continue
- yield path
-
-def testmods_from_testdir(testdir):
- """Generate test modules in the given test dir.
-
- Modules are imported with 'testdir' first on sys.path.
- """
- testdir = normpath(testdir)
- for testmod_path in testmod_paths_from_testdir(testdir):
- testmod_name = splitext(basename(testmod_path))[0]
- log.debug("import test module '%s'", testmod_path)
- try:
- iinfo = imp.find_module(testmod_name, [dirname(testmod_path)])
- testabsdir = abspath(testdir)
- sys.path.insert(0, testabsdir)
- old_dir = os.getcwd()
- os.chdir(testdir)
- try:
- testmod = imp.load_module(testmod_name, *iinfo)
- finally:
- os.chdir(old_dir)
- sys.path.remove(testabsdir)
- except TestSkipped:
- _, ex, _ = sys.exc_info()
- log.warning("'%s' module skipped: %s", testmod_name, ex)
- except Exception:
- _, ex, _ = sys.exc_info()
- log.warning("could not import test module '%s': %s (skipping, "
- "run with '-d' for full traceback)",
- testmod_path, ex)
- if log.isEnabledFor(logging.DEBUG):
- traceback.print_exc()
- else:
- yield testmod
-
-def testcases_from_testmod(testmod):
- """Gather tests from a 'test_*' module.
-
- Returns a list of TestCase-subclass instances. One instance for each
- found test function.
-
- In general the normal unittest TestLoader.loadTests*() semantics are
- used for loading tests with some differences:
- - TestCase subclasses beginning with '_' are skipped (presumed to be
- internal).
- - If the module has a top-level "test_cases", it is called for a list of
- TestCase subclasses from which to load tests (can be a generator). This
- allows for run-time setup of test cases.
- - If the module has a top-level "test_suite_class", it is used to group
- all test cases from that module into an instance of that TestSuite
- subclass. This allows for overriding of test running behaviour.
- """
- class TestListLoader(unittest.TestLoader):
- suiteClass = list
-
- loader = TestListLoader()
- if hasattr(testmod, "test_cases"):
- try:
- for testcase_class in testmod.test_cases():
- if testcase_class.__name__.startswith("_"):
- log.debug("skip private TestCase class '%s'",
- testcase_class.__name__)
- continue
- for testcase in loader.loadTestsFromTestCase(testcase_class):
- yield testcase
- except Exception:
- _, ex, _ = sys.exc_info()
- testmod_path = testmod.__file__
- if testmod_path.endswith(".pyc"):
- testmod_path = testmod_path[:-1]
- log.warning("error running test_cases() in '%s': %s (skipping, "
- "run with '-d' for full traceback)",
- testmod_path, ex)
- if log.isEnabledFor(logging.DEBUG):
- traceback.print_exc()
- else:
- class_names_skipped = []
- for testcases in loader.loadTestsFromModule(testmod):
- for testcase in testcases:
- class_name = testcase.__class__.__name__
- if class_name in class_names_skipped:
- pass
- elif class_name.startswith("_"):
- log.debug("skip private TestCase class '%s'", class_name)
- class_names_skipped.append(class_name)
- else:
- yield testcase
-
-
-def tests_from_manifest(testdir_from_ns):
- """Return a list of `testlib.Test` instances for each test found in
- the manifest.
-
- There will be a test for
- (a) each "test*" function of
- (b) each TestCase-subclass in
- (c) each "test_*" Python module in
- (d) each test dir in the manifest.
-
- If a "test_*" module has a top-level "test_suite_class", it will later
- be used to group all test cases from that module into an instance of that
- TestSuite subclass. This allows for overriding of test running behaviour.
- """
- for ns, testdir in testdir_from_ns.items():
- for testmod in testmods_from_testdir(testdir):
- if hasattr(testmod, "test_suite_class"):
- testsuite_class = testmod.test_suite_class
- if not issubclass(testsuite_class, unittest.TestSuite):
- testmod_path = testmod.__file__
- if testmod_path.endswith(".pyc"):
- testmod_path = testmod_path[:-1]
- log.warning("'test_suite_class' of '%s' module is not a "
- "subclass of 'unittest.TestSuite': ignoring",
- testmod_path)
- else:
- testsuite_class = None
- for testcase in testcases_from_testmod(testmod):
- yield Test(ns, testmod, testcase,
- testcase._testMethodName,
- testsuite_class)
-
-def tests_from_manifest_and_tags(testdir_from_ns, tags):
- include_tags = [tag.lower() for tag in tags if not tag.startswith('-')]
- exclude_tags = [tag[1:].lower() for tag in tags if tag.startswith('-')]
-
- for test in tests_from_manifest(testdir_from_ns):
- test_tags = [t.lower() for t in test.tags()]
-
- matching_exclude_tags = [t for t in exclude_tags if t in test_tags]
- if matching_exclude_tags:
- #log.debug("test '%s' matches exclude tag(s) '%s': skipping",
- # test.shortname(), "', '".join(matching_exclude_tags))
- continue
-
- if not include_tags:
- yield test
- else:
- for tag in include_tags:
- if tag not in test_tags:
- #log.debug("test '%s' does not match tag '%s': skipping",
- # test.shortname(), tag)
- break
- else:
- #log.debug("test '%s' matches tags: %s", test.shortname(),
- # ' '.join(tags))
- yield test
-
-def test(testdir_from_ns, tags=[], setup_func=None):
- log.debug("test(testdir_from_ns=%r, tags=%r, ...)",
- testdir_from_ns, tags)
- if setup_func is not None:
- setup_func()
- tests = list(tests_from_manifest_and_tags(testdir_from_ns, tags))
- if not tests:
- return None
-
- # Groups test cases into a test suite class given by their test module's
- # "test_suite_class" hook, if any.
- suite = unittest.TestSuite()
- suite_for_testmod = None
- testmod = None
- for test in tests:
- if test.testmod != testmod:
- if suite_for_testmod is not None:
- suite.addTest(suite_for_testmod)
- suite_for_testmod = (test.testsuite_class or unittest.TestSuite)()
- testmod = test.testmod
- suite_for_testmod.addTest(test.testcase)
- if suite_for_testmod is not None:
- suite.addTest(suite_for_testmod)
-
- runner = ConsoleTestRunner(sys.stdout)
- result = runner.run(suite)
- return result
-
-def list_tests(testdir_from_ns, tags):
- # Say I have two test_* modules:
- # test_python.py:
- # __tags__ = ["guido"]
- # class BasicTestCase(unittest.TestCase):
- # def test_def(self):
- # def test_class(self):
- # class ComplexTestCase(unittest.TestCase):
- # def test_foo(self):
- # def test_bar(self):
- # test_perl/__init__.py:
- # __tags__ = ["larry", "wall"]
- # class BasicTestCase(unittest.TestCase):
- # def test_sub(self):
- # def test_package(self):
- # class EclecticTestCase(unittest.TestCase):
- # def test_foo(self):
- # def test_bar(self):
- # The short-form list output for this should look like:
- # python/basic/def [guido]
- # python/basic/class [guido]
- # python/complex/foo [guido]
- # python/complex/bar [guido]
- # perl/basic/sub [larry, wall]
- # perl/basic/package [larry, wall]
- # perl/eclectic/foo [larry, wall]
- # perl/eclectic/bar [larry, wall]
- log.debug("list_tests(testdir_from_ns=%r, tags=%r)",
- testdir_from_ns, tags)
-
- tests = list(tests_from_manifest_and_tags(testdir_from_ns, tags))
- if not tests:
- return
-
- WIDTH = 78
- if log.isEnabledFor(logging.INFO): # long-form
- for i, t in enumerate(tests):
- if i:
- print()
- testfile = t.testmod.__file__
- if testfile.endswith(".pyc"):
- testfile = testfile[:-1]
- print("%s:" % t.shortname())
- print(" from: %s#%s.%s" % (testfile,
- t.testcase.__class__.__name__, t.testfn_name))
- wrapped = textwrap.fill(' '.join(t.tags()), WIDTH-10)
- print(" tags: %s" % _indent(wrapped, 8, True))
- if t.doc():
- print(_indent(t.doc(), width=2))
- else:
- for t in tests:
- line = t.shortname() + ' '
- if t.explicit_tags():
- line += '[%s]' % ' '.join(t.explicit_tags())
- print(line)
-
-
-#---- text test runner that can handle TestSkipped reasonably
-
-class ConsoleTestResult(unittest.TestResult):
- """A test result class that can print formatted text results to a stream.
-
- Used by ConsoleTestRunner.
- """
- separator1 = '=' * 70
- separator2 = '-' * 70
-
- def __init__(self, stream):
- unittest.TestResult.__init__(self)
- self.skips = []
- self.stream = stream
-
- def getDescription(self, test):
- if test._testlib_explicit_tags_:
- return "%s [%s]" % (test._testlib_shortname_,
- ', '.join(test._testlib_explicit_tags_))
- else:
- return test._testlib_shortname_
-
- def startTest(self, test):
- unittest.TestResult.startTest(self, test)
- self.stream.write(self.getDescription(test))
- self.stream.write(" ... ")
-
- def addSuccess(self, test):
- unittest.TestResult.addSuccess(self, test)
- self.stream.write("ok\n")
-
- def addSkip(self, test, err):
- why = str(err[1])
- self.skips.append((test, why))
- self.stream.write("skipped (%s)\n" % why)
-
- def addError(self, test, err):
- if isinstance(err[1], TestSkipped):
- self.addSkip(test, err)
- else:
- unittest.TestResult.addError(self, test, err)
- self.stream.write("ERROR\n")
-
- def addFailure(self, test, err):
- unittest.TestResult.addFailure(self, test, err)
- self.stream.write("FAIL\n")
-
- def printSummary(self):
- self.stream.write('\n')
- self.printErrorList('ERROR', self.errors)
- self.printErrorList('FAIL', self.failures)
-
- def printErrorList(self, flavour, errors):
- for test, err in errors:
- self.stream.write(self.separator1 + '\n')
- self.stream.write("%s: %s\n"
- % (flavour, self.getDescription(test)))
- self.stream.write(self.separator2 + '\n')
- self.stream.write("%s\n" % err)
-
-
-class ConsoleTestRunner(object):
- """A test runner class that displays results on the console.
-
- It prints out the names of tests as they are run, errors as they
- occur, and a summary of the results at the end of the test run.
-
- Differences with unittest.TextTestRunner:
- - adds support for *skipped* tests (those that raise TestSkipped)
- - no verbosity option (only have equiv of verbosity=2)
- - test "short desc" is it 3-level tag name (e.g. 'foo/bar/baz' where
- that identifies: 'test_foo.py::BarTestCase.test_baz'.
- """
- def __init__(self, stream=sys.stderr):
- self.stream = stream
-
- def run(self, test_or_suite, test_result_class=ConsoleTestResult):
- """Run the given test case or test suite."""
- result = test_result_class(self.stream)
- start_time = time.time()
- test_or_suite.run(result)
- time_taken = time.time() - start_time
-
- result.printSummary()
- self.stream.write(result.separator2 + '\n')
- self.stream.write("Ran %d test%s in %.3fs\n\n"
- % (result.testsRun, result.testsRun != 1 and "s" or "",
- time_taken))
- details = []
- num_skips = len(result.skips)
- if num_skips:
- details.append("%d skip%s"
- % (num_skips, (num_skips != 1 and "s" or "")))
- if not result.wasSuccessful():
- num_failures = len(result.failures)
- if num_failures:
- details.append("%d failure%s"
- % (num_failures, (num_failures != 1 and "s" or "")))
- num_errors = len(result.errors)
- if num_errors:
- details.append("%d error%s"
- % (num_errors, (num_errors != 1 and "s" or "")))
- self.stream.write("FAILED (%s)\n" % ', '.join(details))
- elif details:
- self.stream.write("OK (%s)\n" % ', '.join(details))
- else:
- self.stream.write("OK\n")
- return result
-
-
-
-#---- internal support stuff
-
-# Recipe: indent (0.2.1)
-def _indent(s, width=4, skip_first_line=False):
- """_indent(s, [width=4]) -> 's' indented by 'width' spaces
-
- The optional "skip_first_line" argument is a boolean (default False)
- indicating if the first line should NOT be indented.
- """
- lines = s.splitlines(1)
- indentstr = ' '*width
- if skip_first_line:
- return indentstr.join(lines)
- else:
- return indentstr + indentstr.join(lines)
-
-
-
-
-
-#---- mainline
-
-#TODO: pass in add_help_option=False and add it ourself here.
-## Optparse's handling of the doc passed in for -h|--help handling is
-## abysmal. Hence we'll stick with getopt.
-#def _parse_opts(args):
-# """_parse_opts(args) -> (options, tags)"""
-# usage = "usage: %prog [OPTIONS...] [TAGS...]"
-# parser = optparse.OptionParser(prog="test", usage=usage,
-# description=__doc__)
-# parser.add_option("-v", "--verbose", dest="log_level",
-# action="store_const", const=logging.DEBUG,
-# help="more verbose output")
-# parser.add_option("-q", "--quiet", dest="log_level",
-# action="store_const", const=logging.WARNING,
-# help="quieter output")
-# parser.add_option("-l", "--list", dest="action",
-# action="store_const", const="list",
-# help="list available tests")
-# parser.set_defaults(log_level=logging.INFO, action="test")
-# opts, raw_tags = parser.parse_args()
-#
-# # Trim '.py' from user-supplied tags. They might have gotten there
-# # via shell expansion.
-# ...
-#
-# return opts, raw_tags
-
-def _parse_opts(args, default_tags):
- """_parse_opts(args) -> (log_level, action, tags)"""
- opts, raw_tags = getopt.getopt(args, "hvqdlL:n",
- ["help", "verbose", "quiet", "debug", "list", "no-default-tags"])
- log_level = logging.WARN
- action = "test"
- no_default_tags = False
- for opt, optarg in opts:
- if opt in ("-h", "--help"):
- action = "help"
- elif opt in ("-v", "--verbose"):
- log_level = logging.INFO
- elif opt in ("-q", "--quiet"):
- log_level = logging.ERROR
- elif opt in ("-d", "--debug"):
- log_level = logging.DEBUG
- elif opt in ("-l", "--list"):
- action = "list"
- elif opt in ("-n", "--no-default-tags"):
- no_default_tags = True
- elif opt == "-L":
- # Optarg is of the form ':', e.g.
- # "codeintel:DEBUG", "codeintel.db:INFO".
- lname, llevelname = optarg.split(':', 1)
- llevel = getattr(logging, llevelname)
- logging.getLogger(lname).setLevel(llevel)
-
- # Clean up the given tags.
- if no_default_tags:
- tags = []
- else:
- tags = default_tags
- for raw_tag in raw_tags:
- if splitext(raw_tag)[1] in (".py", ".pyc", ".pyo", ".pyw") \
- and exists(raw_tag):
- # Trim '.py' from user-supplied tags if it looks to be from
- # shell expansion.
- tags.append(splitext(raw_tag)[0])
- elif '/' in raw_tag:
- # Split one '/' to allow the shortname from the test listing
- # to be used as a filter.
- tags += raw_tag.split('/')
- else:
- tags.append(raw_tag)
-
- return log_level, action, tags
-
-
-def harness(testdir_from_ns={None: os.curdir}, argv=sys.argv,
- setup_func=None, default_tags=None):
- """Convenience mainline for a test harness "test.py" script.
-
- "testdir_from_ns" (optional) is basically a set of directories in
- which to look for test cases. It is a dict with:
- :
- where is a (short) string that becomes part of the
- included test names and an implicit tag for filtering those
- tests. By default the current dir is use with an empty namespace:
- {None: os.curdir}
- "setup_func" (optional) is a callable that will be called once
- before any tests are run to prepare for the test suite. It
- is not called if no tests will be run.
- "default_tags" (optional)
-
- Typically, if you have a number of test_*.py modules you can create
- a test harness, "test.py", for them that looks like this:
-
- #!/usr/bin/env python
- if __name__ == "__main__":
- retval = testlib.harness()
- sys.exit(retval)
- """
- if not logging.root.handlers:
- logging.basicConfig()
- try:
- log_level, action, tags = _parse_opts(argv[1:], default_tags or [])
- except getopt.error:
- _, ex, _ = sys.exc_info()
- log.error(str(ex) + " (did you need a '--' before a '-TAG' argument?)")
- return 1
- log.setLevel(log_level)
-
- if action == "help":
- print(__doc__)
- return 0
- if action == "list":
- return list_tests(testdir_from_ns, tags)
- elif action == "test":
- result = test(testdir_from_ns, tags, setup_func=setup_func)
- if result is None:
- return None
- return len(result.errors) + len(result.failures)
- else:
- raise TestError("unexpected action/mode: '%s'" % action)
-
-
diff --git a/test/tm-cases/auto_link.tags b/test/tm-cases/auto_link.tags
new file mode 100644
index 00000000..d4f0f734
--- /dev/null
+++ b/test/tm-cases/auto_link.tags
@@ -0,0 +1 @@
+htmlentities
\ No newline at end of file
diff --git a/test/tm-cases/auto_link_email_with_underscore.tags b/test/tm-cases/auto_link_email_with_underscore.tags
index 732a565a..89751328 100644
--- a/test/tm-cases/auto_link_email_with_underscore.tags
+++ b/test/tm-cases/auto_link_email_with_underscore.tags
@@ -1 +1 @@
-issue26
+issue26 htmlentities
diff --git a/test/tm-cases/auto_link_safe_mode.tags b/test/tm-cases/auto_link_safe_mode.tags
index 8b1951ca..9ed8e44f 100644
--- a/test/tm-cases/auto_link_safe_mode.tags
+++ b/test/tm-cases/auto_link_safe_mode.tags
@@ -1 +1 @@
-issue7 safe_mode
+issue7 safe_mode htmlentities
diff --git a/test/tm-cases/toc_4.tags b/test/tm-cases/toc_4.tags
index 2b2472e0..b3347c83 100644
--- a/test/tm-cases/toc_4.tags
+++ b/test/tm-cases/toc_4.tags
@@ -1 +1 @@
-toc extra
+toc extra unicode