diff --git a/.circleci/config.yml b/.circleci/config.yml index 71c6ce06..692dbe4f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,7 @@ jobs: # ### INITIALIZE AND CACHE REQUIREMENTS ### - restore_cache: keys: - - v2-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }} + - v3-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }} - run: name: install dependencies command: | @@ -63,14 +63,15 @@ jobs: - save_cache: paths: - ./venv - key: v2-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }} + key: v3-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }} # ### RUN TESTS ### - run: name: run tests command: | . venv/bin/activate - pip install pytest-cov==2.8.1 # hack to avoid regression - python run_tests.py + # pip install pytest-cov==2.8.1 # hack to avoid regression + #python run_tests.py + python -m pytest --cov=xdoctest --cov-config .coveragerc --cov-report term -s - store_artifacts: path: test-reports destination: test-reports @@ -86,7 +87,7 @@ jobs: # ### INITIALIZE AND CACHE REQUIREMENTS ### - restore_cache: keys: - - v2-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/optional.txt" }}-{{ checksum "requirements/tests.txt" }} + - v3-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/optional.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }}-{{ checksum "requirements/colors.txt" }} - run: name: install dependencies command: | @@ -100,14 +101,15 @@ jobs: - save_cache: paths: - ./venv - key: v2-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/optional.txt" }}-{{ checksum "requirements/tests.txt" }} + key: v3-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/optional.txt" }}-{{ checksum "requirements/tests.txt" }}-{{ checksum "requirements/jupyter.txt" }}-{{ checksum "requirements/colors.txt" }} # ### RUN TESTS ### - run: name: run tests command: | . venv/bin/activate - pip install pytest-cov==2.8.1 # hack to avoid regression - python run_tests.py + # pip install pytest-cov==2.8.1 # hack to avoid regression + #python run_tests.py + python -m pytest --cov=xdoctest --cov-config .coveragerc --cov-report term -s - store_artifacts: path: test-reports destination: test-reports @@ -227,25 +229,83 @@ jobs: - PYTHON_EXE: pypy3 working_directory: ~/repo-full-pypy3 - heredoc: - docker: - - image: pypy:3 - working_directory: ~/dev-only-not-a-real-job - steps: - - | - __heredoc__=" +.__heredoc__: &__heredoc__ + - | + IMAGE_NAME=circleci/python:3.9 + docker pull $IMAGE_NAME + + IMAGE_NAME=pypy:3 + docker pull $IMAGE_NAME + docker run -v $HOME/code/xdoctest:/io -it $IMAGE_NAME bash + + IMAGE_NAME=circleci/python:3.10-rc + docker pull $IMAGE_NAME + + docker run -v $HOME/code/xdoctest:/io -it pypy:3 bash + docker run -v $HOME/code/xdoctest:/io -it $IMAGE_NAME bash + cd /io + + # Logic to print out the commands to reproduce CI steps + source $HOME/local/init/utils.sh + pyblock " + import yaml + import ubelt as ub + data = yaml.safe_load(open(ub.expandpath('$HOME/code/xdoctest/.circleci/config.yml'))) + JOB_NAME = 'test-minimal-pypy3' + job = data['jobs'][JOB_NAME] + IMAGE_NAME = job['docker'][0]['image'] + print('IMAGE_NAME={}'.format(IMAGE_NAME)) + print('docker run -v $HOME/code/xdoctest:/io -it {} bash'.format(IMAGE_NAME)) + print(ub.codeblock( + ''' + ### + ### + # Clone the mounted repo for a fresh start + mkdir -p $HOME/code + git clone /io /root/{JOB_NAME} + cd /root/{JOB_NAME} + ''').format(JOB_NAME=JOB_NAME)) + for kv in job['environment']: + for k, v in kv.items(): + print('{}={}'.format(k, v)) + for step in job['steps']: + if 'run' in step: + print(step['run']['command']) + " + - pypy3 -m venv venv + IMAGE_NAME=pypy:3 + docker run -v /home/joncrall/code/xdoctest:/io -it pypy:3 bash + ### + ### + # Clone the mounted repo for a fresh start + mkdir -p /home/joncrall/code + git clone /io /root/test-minimal-pypy3 + cd /root/test-minimal-pypy3 + PYTHON_EXE=pypy3 + $PYTHON_EXE -m venv venv || virtualenv -v venv # first command is python3 || second is python2 . venv/bin/activate # The "minimal" tests install barebones requirements + pip install pip -U pip install -r requirements/tests.txt pip install -r requirements/runtime.txt - pip install -e . + pip install . - pip install pytest-cov==2.8.1 # hack to avoid regression - ./run_doctests.sh || echo "pypy failed, but this is allowed" - ./run_tests.sh || echo "pypy failed, but this is allowed" - " + . venv/bin/activate + python -m pytest --cov=xdoctest --cov-config .coveragerc --cov-report term -s + # pip install pytest-cov==2.8.1 # hack to avoid regression + #python run_tests.py + + + # TO RUN A JOB ON YOUR LOCAL MACHINE + # INSTALL CIRCLE CI + curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | DESTDIR=$HOME/.local/bin bash + + JOB_NAME=test-minimal-pypy3 + circleci local execute --job $JOB_NAME + + JOB_NAME=test-full-pypy3 + circleci local execute --job $JOB_NAME diff --git a/.travis.yml b/.travis.yml index 196197f7..0b18baa4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,7 +24,7 @@ python: - "3.4" - "3.5" - "3.6" - - "nightly" + #- "nightly" # remove nightly for now before_install: - pip install pip -U diff --git a/CHANGELOG.md b/CHANGELOG.md index bfd52204..cbc3dc6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,29 @@ We are currently working on porting this changelog to the specifications in This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## Version 0.14.1 - Unreleased +## Version 0.15.0 - Unreleased + + +### Added +* `pip install xdoctest` can now specify `[colors]` or `[jupyter]` +* Enhanced REQUIRES directive behavior, multiple comma-separated requirements + can now be listed in one directive. +* Xdoctest can now be run inside of Jupyter notebooks / IPython sessions +* Xdoctest can now be run on Jupyter notebooks (Note that in general it is + better practice to write a module) ### Fixed * Bug in `doctest_callable` where it would not populate globals from the function context. +### Changed +* Renamed `Config` to `DoctestConfig` +* Renamed `static_analysis.parse_calldefs` to `static_analysis.parse_static_calldefs`. + A temporary function with the old name is exposed for backwards compatibility. +* Changed argument name from `modpath_or_name` to `module_identifier` in several functions. + This is to better indicate its coercible nature as either a module path, a + module name. This change impacts `doctest_module`, `parse_doctestables`, + `package_calldefs`. + ## [Version 0.14.0] - Released 2020-08-26 diff --git a/appveyor.yml b/appveyor.yml index 4d98f7fe..19674897 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -66,7 +66,7 @@ test_script: - set PYTHONIOENCODING=utf-8 - "%PYTHON%\\python.exe -m pip install pytest-cov==2.8.1" - "%PYTHON%\\python.exe -m xdoctest xdoctest" - - "%PYTHON%\\python.exe -m pytest" + - "%PYTHON%\\python.exe -m pytest -s --xdoctest-verbose=3" after_test: # This step builds your wheels. @@ -77,7 +77,7 @@ after_test: artifacts: # bdist_wheel puts your built wheel in the dist directory - - path: dist\* + #- path: dist\* #on_success: # You can use this step to upload your artifacts to a public website. diff --git a/pytest.ini b/pytest.ini index 34cef2af..216737dd 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,9 +1,9 @@ [pytest] # ON COVERAGE OF PYTEST PLUGINS: # http://pytest-cov.readthedocs.io/en/latest/plugins.html -addopts = -p pytester -p no:doctest --xdoctest --ignore-glob=setup.py +addopts = -p pytester -p no:doctest --xdoctest --ignore-glob=setup.py --ignore=.tox norecursedirs = .git ignore build __pycache__ docs *.egg-info _* dev testing/pybind11_test setup.py ---pyargs --doctest-modules --ignore=.tox +# --pyargs --doctest-modules --ignore=.tox ;rsyncdirs = tox.ini pytest.py _pytest testing ;python_files = test_*.py *_test.py testing/*/*.py ;python_classes = Test Acceptance diff --git a/requirements/colors.txt b/requirements/colors.txt new file mode 100644 index 00000000..59f5a094 --- /dev/null +++ b/requirements/colors.txt @@ -0,0 +1,2 @@ +Pygments >= 2.2.0 +colorama >= 0.4.1;platform_system=="Windows" diff --git a/requirements/jupyter.txt b/requirements/jupyter.txt new file mode 100644 index 00000000..a8fee76c --- /dev/null +++ b/requirements/jupyter.txt @@ -0,0 +1,6 @@ +nbformat;python_version>'3.4' +nbconvert; python_version>'3.4' +jupyter_client; python_version>'3.4' +IPython; python_version>'3.4' +ipykernel; python_version>'3.4' + diff --git a/requirements/optional.txt b/requirements/optional.txt index 59f5a094..95b77471 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,2 +1,2 @@ -Pygments >= 2.2.0 -colorama >= 0.4.1;platform_system=="Windows" +-r colors.txt +-r jupyter.txt diff --git a/requirements/tests.txt b/requirements/tests.txt index a226ade4..7939fa91 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -9,9 +9,7 @@ ninja pybind11 # for testing doctests in jupyter notebooks -nbformat -nbconvert -jupyter_client +-r jupyter.txt #pip uninstall pytest-ipynb #pytest-ipynb >= 1.1.1 diff --git a/setup.py b/setup.py index 3c1ce925..93191bbe 100755 --- a/setup.py +++ b/setup.py @@ -25,70 +25,92 @@ def visit_Assign(self, node): return visitor.version -def parse_requirements(fname='requirements.txt'): +def parse_requirements(fname='requirements.txt', with_version=False): """ Parse the package dependencies listed in a requirements file but strips specific versioning information. - CommandLine: - python -c "import setup; print(setup.parse_requirements())" + Args: + fname (str): path to requirements file + with_version (bool, default=False): if true include version specs + + Returns: + List[str]: list of requirements items """ - from os.path import exists + from os.path import exists, dirname, join import re require_fpath = fname - def parse_line(line): + def parse_line(line, dpath=''): """ Parse information from a line in a requirements text file + + line = 'git+https://a.com/somedep@sometag#egg=SomeDep' + line = '-e git+https://a.com/somedep@sometag#egg=SomeDep' """ + # Remove inline comments + comment_pos = line.find(' #') + if comment_pos > -1: + line = line[:comment_pos] + if line.startswith('-r '): # Allow specifying requirements in other files - target = line.split(' ')[1] + target = join(dpath, line.split(' ')[1]) for info in parse_require_file(target): yield info - elif line.startswith('-e '): - info = {} - info['package'] = line.split('#egg=')[1] - yield info else: - # Remove versioning from the package - pat = '(' + '|'.join(['>=', '==', '>']) + ')' - parts = re.split(pat, line, maxsplit=1) - parts = [p.strip() for p in parts] - - info = {} - info['package'] = parts[0] - if len(parts) > 1: - op, rest = parts[1:] - if ';' in rest: + # See: https://www.python.org/dev/peps/pep-0508/ + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + if ';' in line: + pkgpart, platpart = line.split(';') # Handle platform specific dependencies - # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies - version, platform_deps = map(str.strip, rest.split(';')) - info['platform_deps'] = platform_deps + # setuptools.readthedocs.io/en/latest/setuptools.html + # #declaring-platform-specific-dependencies + plat_deps = platpart.strip() + info['platform_deps'] = plat_deps else: + pkgpart = line + platpart = None + + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, pkgpart, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] version = rest # NOQA - info['version'] = (op, version) + info['version'] = (op, version) yield info def parse_require_file(fpath): + dpath = dirname(fpath) with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if line and not line.startswith('#'): - for info in parse_line(line): + for info in parse_line(line, dpath=dpath): yield info - # This breaks on pip install, so check that it exists. - packages = [] - if exists(require_fpath): - for info in parse_require_file(require_fpath): - package = info['package'] - if not sys.version.startswith('3.4'): - # apparently package_deps are broken in 3.4 - platform_deps = info.get('platform_deps') - if platform_deps is not None: - package += ';' + platform_deps - packages.append(package) + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + plat_deps = info.get('platform_deps') + if plat_deps is not None: + parts.append(';' + plat_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) return packages @@ -182,6 +204,7 @@ def native_mb_python_tag(plat_impl=None, version_info=None): ) +print(parse_requirements('requirements/tests.txt')) if __name__ == '__main__': setupkw.update(dict( description='A rewrite of the builtin doctest module', @@ -190,6 +213,8 @@ def native_mb_python_tag(plat_impl=None, version_info=None): 'all': parse_requirements('requirements.txt'), 'tests': parse_requirements('requirements/tests.txt'), 'optional': parse_requirements('requirements/optional.txt'), + 'colors': parse_requirements('requirements/colors.txt'), + 'jupyter': parse_requirements('requirements/jupyter.txt'), }, long_description=parse_description(), long_description_content_type='text/x-rst', diff --git a/testing/notebook_with_doctests.ipynb b/testing/notebook_with_doctests.ipynb index e92452d1..61d5175c 100644 --- a/testing/notebook_with_doctests.ipynb +++ b/testing/notebook_with_doctests.ipynb @@ -42,8 +42,11 @@ "metadata": {}, "outputs": [], "source": [ - "import xdoctest\n", - "xdoctest.doctest_callable(inception)" + "if __name__ == '__main__':\n", + " import xdoctest\n", + " xdoctest.doctest_callable(inception)\n", + " xdoctest.doctest_callable(inception2)\n", + " xdoctest.doctest_callable(random_number)" ] }, { @@ -52,18 +55,9 @@ "metadata": {}, "outputs": [], "source": [ - "import xdoctest\n", - "xdoctest.doctest_callable(inception2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import xdoctest\n", - "xdoctest.doctest_callable(random_number)" + "if __name__ == '__main__':\n", + " import xdoctest\n", + " xdoctest.doctest_module(command='all')" ] } ], diff --git a/testing/test_directive.py b/testing/test_directive.py index d5f720d3..cdc851f4 100644 --- a/testing/test_directive.py +++ b/testing/test_directive.py @@ -35,6 +35,43 @@ def test_block_skip_directive(): assert result['passed'] +def test_multi_requires_directive(): + """ + Test semi-complex case with multiple requirements in a single line + + xdoctest ~/code/xdoctest/testing/test_directive.py test_multi_requires_directive + """ + string = utils.codeblock( + ''' + >>> x = 0 + >>> print('not-skipped') + >>> # doctest: +REQUIRES(env:NOT_EXIST, --show, module:xdoctest) + >>> print('is-skipped') + >>> assert False, 'should be skipped' + >>> # doctest: -REQUIRES(env:NOT_EXIST, module:xdoctest) + >>> print('is-skipped') + >>> assert False, 'should be skipped' + >>> # doctest: +REQUIRES(env:NOT_EXIST, --show, module:xdoctest) + >>> print('is-skipped') + >>> assert False, 'should be skipped' + >>> # doctest: -REQUIRES(env:NOT_EXIST) + >>> print('is-skipped') + >>> assert False, 'should be skipped' + >>> # doctest: -REQUIRES(--show) + >>> print('not-skipped') + >>> x = 'this will not be skipped' + >>> # doctest: -REQUIRES(env:NOT_EXIST, --show, module:xdoctest) + >>> print('not-skipped') + >>> assert x == 'this will not be skipped' + ''') + self = doctest_example.DocTest(docsrc=string) + result = self.run(on_error='raise') + stdout = ''.join(list(self.logged_stdout.values())) + assert result['passed'] + assert stdout.count('not-skipped') == 3 + assert stdout.count('is-skipped') == 0 + + if __name__ == '__main__': """ CommandLine: diff --git a/testing/test_doctest_in_notebook.ipynb b/testing/test_doctest_in_notebook.ipynb new file mode 100644 index 00000000..e1b90768 --- /dev/null +++ b/testing/test_doctest_in_notebook.ipynb @@ -0,0 +1,137 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def inception(text):\n", + " '''\n", + " Example:\n", + " >>> inception(\"I heard you liked doctests\")\n", + " '''\n", + " print(text + str(x)) \n", + " \n", + "x = 10\n", + "\n", + "def foo():\n", + " return \"bar\"\n", + "\n", + "def inception2(text):\n", + " '''\n", + " Example:\n", + " >>> inception2(\"I heard you liked doctests\" + foo())\n", + " '''\n", + " print(text + str(x)) \n", + " \n", + "def random_number():\n", + " \"\"\"Returns a random integer from 1 to 6.\n", + " \n", + " >>> type(random_number())\n", + " \n", + " >>> random_number() in range(1,7)\n", + " True\n", + " \"\"\"\n", + " return 5 # Chosen by a fair dice roll" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "====== ======\n", + "* DOCTEST : ::inception:0, line 3 <- wrt source file\n", + "DOCTEST SOURCE\n", + "1 >>> inception(\"I heard you liked doctests\")\n", + "DOCTEST STDOUT/STDERR\n", + "I heard you liked doctests10\n", + "DOCTEST RESULT\n", + "* SUCCESS: ::inception:0\n", + "====== ======\n" + ] + } + ], + "source": [ + "import xdoctest\n", + "xdoctest.doctest_callable(inception)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "====== ======\n", + "* DOCTEST : ::inception2:0, line 3 <- wrt source file\n", + "DOCTEST SOURCE\n", + "1 >>> inception2(\"I heard you liked doctests\" + foo())\n", + "DOCTEST STDOUT/STDERR\n", + "I heard you liked doctestsbar10\n", + "DOCTEST RESULT\n", + "* SUCCESS: ::inception2:0\n", + "====== ======\n" + ] + } + ], + "source": [ + "import xdoctest\n", + "xdoctest.doctest_callable(inception2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import xdoctest\n", + "xdoctest.doctest_callable(random_number)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/testing/test_notebook.py b/testing/test_notebook.py index 126cd1d6..985f220d 100644 --- a/testing/test_notebook.py +++ b/testing/test_notebook.py @@ -1,18 +1,43 @@ -def test_notebook(): - """ - xdoctest ~/code/xdoctest/testing/test_notebook.py test_notebook - """ - # How to run Jupyter from Python - # https://nbconvert.readthedocs.io/en/latest/execute_api.html - import six - import pytest - if six.PY2: - pytest.skip('cannot test this case in Python2') +import pytest +import sys +from os.path import join, exists, dirname +from distutils.version import LooseVersion + +PY_VERSION = LooseVersion('{}.{}'.format(*sys.version_info[0:2])) +IS_MODERN_PYTHON = PY_VERSION > LooseVersion('3.4') + + +def skip_notebook_tests_if_unsupported(): + if not IS_MODERN_PYTHON: + pytest.skip('jupyter support is only for modern python versions') + + try: + import IPython # NOQA + import nbconvert # NOQA + import nbformat # NOQA + except Exception: + pytest.skip('Missing jupyter') + + +def cmd(command): + # simplified version of ub.cmd no fancy tee behavior + import subprocess + proc = subprocess.Popen( + command, shell=True, universal_newlines=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + out, err = proc.communicate() + ret = proc.wait() + info = { + 'proc': proc, + 'out': out, + 'err': err, + 'ret': ret, + } + return info - import nbformat - from nbconvert.preprocessors import ExecutePreprocessor - from os.path import dirname, join, exists +def demodata_notebook_fpath(): try: testdir = dirname(__file__) except NameError: @@ -20,9 +45,38 @@ def test_notebook(): import os testdir = os.path.expandvars('$HOME/code/xdoctest/testing/') assert exists(testdir), 'assuming a specific dev environment' - notebook_fpath = join(testdir, "notebook_with_doctests.ipynb") - ep = ExecutePreprocessor(timeout=600, kernel_name='python3') - with open(notebook_fpath) as file: - nb = nbformat.read(file, as_version=nbformat.NO_CONVERT) - ep.preprocess(nb, {'metadata': {'path': testdir}}) + return notebook_fpath + + +def test_xdoctest_inside_notebook(): + """ + xdoctest ~/code/xdoctest/testing/test_notebook.py test_xdoctest_inside_notebook + + xdoctest notebook_with_doctests.ipynb + """ + # How to run Jupyter from Python + # https://nbconvert.readthedocs.io/en/latest/execute_api.html + skip_notebook_tests_if_unsupported() + + notebook_fpath = demodata_notebook_fpath() + + from xdoctest.utils import util_notebook + nb, resources = util_notebook.execute_notebook(notebook_fpath, verbose=3) + + last_cell = nb['cells'][-1] + text = last_cell['outputs'][0]['text'] + assert '3 / 3 passed' in text + + +def test_xdoctest_outside_notebook(): + + skip_notebook_tests_if_unsupported() + + if sys.platform.startswith('win32'): + pytest.skip() + + notebook_fpath = demodata_notebook_fpath() + info = cmd(sys.executable + ' -m xdoctest ' + notebook_fpath) + text = info['out'] + assert '3 / 3 passed' in text diff --git a/xdoctest/__init__.py b/xdoctest/__init__.py index f67b0669..1728f35f 100644 --- a/xdoctest/__init__.py +++ b/xdoctest/__init__.py @@ -236,7 +236,7 @@ def fib(n): ''' # mkinit xdoctest --nomods -__version__ = '0.14.1' +__version__ = '0.15.0' # TODO: diff --git a/xdoctest/__main__.py b/xdoctest/__main__.py index d413e459..df3c1152 100644 --- a/xdoctest/__main__.py +++ b/xdoctest/__main__.py @@ -72,7 +72,7 @@ class RawDescriptionDefaultsHelpFormatter( from xdoctest import doctest_example from xdoctest import runner runner._update_argparse_cli(parser.add_argument) - doctest_example.Config()._update_argparse_cli(parser.add_argument) + doctest_example.DoctestConfig()._update_argparse_cli(parser.add_argument) args, unknown = parser.parse_known_args(args=argv[1:]) ns = args.__dict__.copy() @@ -134,7 +134,7 @@ class RawDescriptionDefaultsHelpFormatter( ns['options'] = options from xdoctest import doctest_example - config = doctest_example.Config()._populate_from_cli(ns) + config = doctest_example.DoctestConfig()._populate_from_cli(ns) import textwrap if config['verbose'] > 2: diff --git a/xdoctest/core.py b/xdoctest/core.py index 255df1d7..57103a5e 100644 --- a/xdoctest/core.py +++ b/xdoctest/core.py @@ -2,6 +2,23 @@ """ Core methods used by xdoctest runner and plugin code to statically extract doctests from a module or package. + + +The following is a list of terms and jargon used in this repo. +TODO: ensure the list is complete, + +Glossary: + + * callname - the name of a callable function, method, class etc.. + e.g. ``myfunc``, ``MyClass``, or ``MyClass.some_method``. + + * got / want - a test that produces stdout or a value to check. Whatever is + produced is what you "got" and whatever is expected is what you "want". + + * directives - special in-doctest comments that change the behavior + of the doctests at runtime. + + """ from __future__ import absolute_import, division, print_function, unicode_literals import sys @@ -9,6 +26,7 @@ import warnings import six import itertools as it +import types from os.path import exists from fnmatch import fnmatch from xdoctest import dynamic_analysis @@ -18,6 +36,7 @@ from xdoctest import doctest_example from xdoctest import utils from xdoctest.docstr import docscrape_google +from xdoctest.utils import util_import DEBUG = '--debug' in sys.argv @@ -55,6 +74,7 @@ def parse_freeform_docstr_examples(docstr, callname=None, modpath=None, python -m xdoctest.core parse_freeform_docstr_examples Example: + >>> # TODO: move this to unit tests and make the doctest simpler >>> from xdoctest import core >>> from xdoctest import utils >>> docstr = utils.codeblock( @@ -63,7 +83,7 @@ def parse_freeform_docstr_examples(docstr, callname=None, modpath=None, >>> >>> doctest >>> >>> hasmultilines >>> whoppie - >>> >>> 'butthis is the same doctest' + >>> >>> 'but this is the same doctest' >>> >>> >>> secondone >>> @@ -264,9 +284,10 @@ def parse_docstr_examples(docstr, callname=None, modpath=None, lineno=1, modpath (PathLike): original module the docstring is from - lineno (int): the line number (starting from 1) of the docstring. - i.e. if you were to go to this line number in the source file - the starting quotes of the docstr would be on this line. + lineno (int, default=1): + the line number (starting from 1) of the docstring. i.e. if you + were to go to this line number in the source file the starting + quotes of the docstr would be on this line. style (str): expected doctest style (e.g. google, freeform, auto) @@ -356,7 +377,9 @@ def parse_docstr_examples(docstr, callname=None, modpath=None, lineno=1, def _rectify_to_modpath(modpath_or_name): """ if modpath_or_name is a name, statically converts it to a path """ - modpath = static_analysis.modname_to_modpath(modpath_or_name) + if isinstance(modpath_or_name, types.ModuleType): + raise TypeError('Expected a static module but got a dynamic one') + modpath = util_import.modname_to_modpath(modpath_or_name) if modpath is None: if six.PY2: if modpath_or_name.endswith('.pyc'): @@ -368,101 +391,151 @@ def _rectify_to_modpath(modpath_or_name): return modpath -def package_calldefs(modpath_or_name, exclude=[], ignore_syntax_errors=True, - analysis='static'): +def package_calldefs(pkg_identifier, exclude=[], ignore_syntax_errors=True, + analysis='auto'): """ Statically generates all callable definitions in a module or package Args: - modpath_or_name (str): path to or name of the module to be tested + pkg_identifier (str | Module): path to or name of the module to be + tested (or the live module itself, which is not recommended) exclude (List[str]): glob-patterns of file names to exclude ignore_syntax_errors (bool, default=True): if False raise an error when syntax errors occur in a doctest - analysis (str, default='static'): + analysis (str, default='auto'): if 'static', only static analysis is used to parse call definitions. If 'auto', uses dynamic analysis for compiled python extensions, but static analysis elsewhere, if 'dynamic', then dynamic analysis is used to parse all calldefs. + Yields: + Tuple[Dict[str, CallDefNode], str | Module] - + item[0]: the mapping of callnames-to-calldefs + item[1]: the path to the file containing the doctest + (usually a module) or the module itself + Example: - >>> modpath_or_name = 'xdoctest.core' - >>> testables = list(package_calldefs(modpath_or_name)) + >>> pkg_identifier = 'xdoctest.core' + >>> testables = list(package_calldefs(pkg_identifier)) >>> assert len(testables) == 1 >>> calldefs, modpath = testables[0] - >>> assert static_analysis.modpath_to_modname(modpath) == modpath_or_name + >>> assert util_import.modpath_to_modname(modpath) == pkg_identifier >>> assert 'package_calldefs' in calldefs """ - pkgpath = _rectify_to_modpath(modpath_or_name) - - modpaths = static_analysis.package_modpaths(pkgpath, with_pkg=True, - with_libs=True) - modpaths = list(modpaths) - for modpath in modpaths: - modname = static_analysis.modpath_to_modname(modpath) - if any(fnmatch(modname, pat) for pat in exclude): - continue - if not exists(modpath): - warnings.warn( - 'Module {} does not exist. ' - 'Is it an old pyc file?'.format(modname)) - continue - - # backwards compatibility hacks - if '--allow-xdoc-dynamic' in sys.argv: - analysis = 'auto' - if '--xdoc-force-dynamic' in sys.argv: - analysis = 'dynamic' - - needs_dynamic = modpath.endswith( - static_analysis._platform_pylib_exts()) - - if analysis == 'static': - do_dynamic = False - elif analysis == 'dynamic': - do_dynamic = True - elif analysis == 'auto': - do_dynamic = needs_dynamic - else: - raise KeyError(analysis) - - if do_dynamic: - try: - calldefs = dynamic_analysis.parse_dynamic_calldefs(modpath) - except (ImportError, RuntimeError) as ex: - # Some modules are just c modules - msg = 'Cannot dynamically parse module={} at path={}.\nCaused by: {!r} {}' - msg = msg.format(modname, modpath, type(ex), ex) - warnings.warn(msg) - except Exception as ex: - msg = 'Cannot dynamically parse module={} at path={}.\nCaused by: {!r} {}' - msg = msg.format(modname, modpath, type(ex), ex) - warnings.warn(msg) - raise - else: - yield calldefs, modpath - else: - if needs_dynamic: - # Some modules can only be parsed dynamically + if isinstance(pkg_identifier, types.ModuleType): + # Case where we are forced to use a live module + identifiers = [pkg_identifier] + else: + pkgpath = _rectify_to_modpath(pkg_identifier) + identifiers = list(static_analysis.package_modpaths( + pkgpath, with_pkg=True, with_libs=True)) + + for module_identifier in identifiers: + if isinstance(module_identifier, six.string_types): + modpath = module_identifier + modname = util_import.modpath_to_modname(modpath) + if any(fnmatch(modname, pat) for pat in exclude): continue - try: - calldefs = static_analysis.parse_calldefs(fpath=modpath) - except SyntaxError as ex: - # Handle error due to the actual code containing errors - msg = 'Cannot parse module={} at path={}.\nCaused by: {}' - msg = msg.format(modname, modpath, ex) - if ignore_syntax_errors: - warnings.warn(msg) # real code or docstr contained errors - continue - else: - raise SyntaxError(msg) + if not exists(modpath): + warnings.warn( + 'Module {} does not exist. ' + 'Is it an old pyc file?'.format(modname)) + continue + try: + calldefs = parse_calldefs(module_identifier) + if calldefs is not None: + yield calldefs, module_identifier + except SyntaxError as ex: + # Handle error due to the actual code containing errors + msg = 'Cannot parse module={}.\nCaused by: {}' + msg = msg.format(module_identifier, ex) + if ignore_syntax_errors: + warnings.warn(msg) # real code or docstr contained errors else: - yield calldefs, modpath + raise SyntaxError(msg) + + +def parse_calldefs(module_identifier, analysis='auto'): + """ + Parse calldefs from a single module using either static or dynamic + analysis. + + Args: + module_identifier (str | Module): path to or name of the module to be + tested (or the live module itself, which is not recommended) + + analysis (str, default='auto'): + if 'static', only static analysis is used to parse call + definitions. If 'auto', uses dynamic analysis for compiled python + extensions, but static analysis elsewhere, if 'dynamic', then + dynamic analysis is used to parse all calldefs. + + Returns: + Dict[str, CallDefNode]: the mapping of callnames-to-calldefs within + the module. + """ + # backwards compatibility hacks + if '--allow-xdoc-dynamic' in sys.argv: + warnings.warn( + '--allow-xdoc-dynamic is deprecated and will be removed in ' + 'the future use --analysis=auto instead', DeprecationWarning) + analysis = 'auto' + if '--xdoc-force-dynamic' in sys.argv: + warnings.warn( + '--xdoc-force-dynamic is deprecated and will be removed in ' + 'the future use --analysis=dynamic instead', DeprecationWarning) + analysis = 'dynamic' + + if isinstance(module_identifier, types.ModuleType): + # identifier is a live module + need_dynamic = True + else: + # identifier is a path to a module + modpath = module_identifier + # Certain files (notebooks and c-extensions) require dynamic analysis + need_dynamic = modpath.endswith( + static_analysis._platform_pylib_exts()) + if modpath.endswith('.ipynb'): + need_dynamic = True + + if analysis == 'static': + if need_dynamic: + # Some modules can only be parsed dynamically + raise Exception(( + 'Static analysis required, but {} requires ' + 'dynamic analysis').format(module_identifier)) + do_dynamic = False + elif analysis == 'dynamic': + do_dynamic = True + elif analysis == 'auto': + do_dynamic = need_dynamic + else: + raise KeyError(analysis) + + calldefs = None + if do_dynamic: + try: + calldefs = dynamic_analysis.parse_dynamic_calldefs(module_identifier) + except (ImportError, RuntimeError) as ex: + # Some modules are just c modules + msg = 'Cannot dynamically parse module={}.\nCaused by: {!r} {}' + msg = msg.format(module_identifier, type(ex), ex) + warnings.warn(msg) + except Exception as ex: + msg = 'Cannot dynamically parse module={}.\nCaused by: {!r} {}' + msg = msg.format(module_identifier, type(ex), ex) + warnings.warn(msg) + raise + else: + calldefs = static_analysis.parse_static_calldefs(fpath=module_identifier) + + return calldefs -def parse_doctestables(modpath_or_name, exclude=[], style='auto', +def parse_doctestables(module_identifier, exclude=[], style='auto', ignore_syntax_errors=True, parser_kw={}, analysis='static'): """ @@ -470,7 +543,8 @@ def parse_doctestables(modpath_or_name, exclude=[], style='auto', example objects. The style influences which tests are found. Args: - modpath_or_name (str | PathLike): path or name of a module + module_identifier (str | PathLike | Module): + path or name of a module or a module itself (we prefer a path) exclude (List[str]): glob-patterns of file names to exclude @@ -494,8 +568,8 @@ def parse_doctestables(modpath_or_name, exclude=[], style='auto', python -m xdoctest.core parse_doctestables Example: - >>> modpath_or_name = 'xdoctest.core' - >>> testables = list(parse_doctestables(modpath_or_name)) + >>> module_identifier = 'xdoctest.core' + >>> testables = list(parse_doctestables(module_identifier)) >>> this_example = None >>> for example in testables: >>> # print(example) @@ -525,7 +599,7 @@ def parse_doctestables(modpath_or_name, exclude=[], style='auto', style, DOCTEST_STYLES)) # Statically parse modules and their doctestable callables in a package - for calldefs, modpath in package_calldefs(modpath_or_name, exclude, + for calldefs, modpath in package_calldefs(module_identifier, exclude, ignore_syntax_errors, analysis=analysis): for callname, calldef in calldefs.items(): diff --git a/xdoctest/directive.py b/xdoctest/directive.py index 479c8bcd..bb809262 100644 --- a/xdoctest/directive.py +++ b/xdoctest/directive.py @@ -238,38 +238,40 @@ def update(self, directives): Update the runtime state given a set of directives Args: - directives (List[Directive]): list of directives. The `effect` + directives (List[Directive]): list of directives. The `effects` method is used to update this object. """ # Clear the previous inline state self._inline_state.clear() for directive in directives: + for effect in directive.effects(): + action, key, value = effect + if action == 'noop': + continue - action, key, value = directive.effect() + if key not in self._global_state: + warnings.warn('Unknown state: {}'.format(key)) - if action == 'noop': - continue - - if key not in self._global_state: - warnings.warn('Unknown state: {}'.format(key)) - - # Determine if this impacts the local (inline) or global state. - if directive.inline: - state = self._inline_state - else: - state = self._global_state - - if action == 'set_report_style': - # Special handling of report style - self.set_report_style(key.replace('REPORT_', '')) - elif action == 'assign': - state[key] = value - elif action == 'set.add': - state[key].add(value) - elif action == 'set.remove': - state[key].remove(value) - else: - raise KeyError('unknown action {}'.format(action)) + # Determine if this impacts the local (inline) or global state. + if directive.inline: + state = self._inline_state + else: + state = self._global_state + + if action == 'set_report_style': + # Special handling of report style + self.set_report_style(key.replace('REPORT_', '')) + elif action == 'assign': + state[key] = value + elif action == 'set.add': + state[key].add(value) + elif action == 'set.remove': + try: + state[key].remove(value) + except KeyError: + pass + else: + raise KeyError('unknown action {}'.format(action)) class Directive(utils.NiceRepr): @@ -323,6 +325,11 @@ def extract(cls, text): >>> print(', '.join(list(map(str, Directive.extract(text))))) , + >>> # Make sure commas inside parens are not split + >>> text = '# xdoctest: +REQUIRES(module:foo,module:bar)' + >>> print(', '.join(list(map(str, Directive.extract(text))))) + + Example: >>> any(Directive.extract(' # xdoctest: skip')) True @@ -344,7 +351,9 @@ def extract(cls, text): if m: for key, optstr in m.groupdict().items(): if optstr: - for optpart in optstr.split(','): + optparts = _split_opstr(optstr) + # optparts = optstr.split(',') + for optpart in optparts: directive = parse_directive_optstr(optpart, inline) if directive: yield directive @@ -358,6 +367,7 @@ def __nice__(self): return '{}{}'.format(prefix, self.name) def _unpack_args(self, num): + warnings.warning('Deprecated and will be removed', DeprecationWarning) nargs = self.args if len(nargs) != 1: raise TypeError( @@ -366,10 +376,17 @@ def _unpack_args(self, num): return self.args def effect(self, argv=None, environ=None): + warnings.warning('Deprecated use effects', DeprecationWarning) + effects = self.effects(argv=argv, environ=environ) + if len(effects) > 1: + raise Exception('Old method cannot hanldle multiple effects') + return effects[0] + + def effects(self, argv=None, environ=None): """ Returns how this directive modifies a RuntimeState object - This is used by a RuntimeState object to update itself + This is called by :func:`RuntimeState.update` to update itself Args: argv (List[str], default=None): @@ -378,80 +395,126 @@ def effect(self, argv=None, environ=None): if specified, overwrite os.environ Returns: - Effect: named tuple containing: + List[Effect]: list of named tuples containing: action (str): code indicating how to update key (str): name of runtime state item to modify value (object): value to modify with CommandLine: - xdoctest -m xdoctest.directive Directive.effect + xdoctest -m xdoctest.directive Directive.effects Example: - >>> Directive('SKIP').effect() + >>> Directive('SKIP').effects()[0] Effect(action='assign', key='SKIP', value=True) - >>> Directive('SKIP', inline=True).effect() + >>> Directive('SKIP', inline=True).effects()[0] Effect(action='assign', key='SKIP', value=True) - >>> Directive('REQUIRES', args=['-s']).effect(argv=['-s']) - Effect(action='noop', key='REQUIRES', value=None) - >>> Directive('REQUIRES', args=['-s']).effect(argv=[]) + >>> Directive('REQUIRES', args=['-s']).effects(argv=['-s'])[0] + Effect(action='noop', key='REQUIRES', value='-s') + >>> Directive('REQUIRES', args=['-s']).effects(argv=[])[0] Effect(action='set.add', key='REQUIRES', value='-s') - >>> Directive('ELLIPSIS', args=['-s']).effect(argv=[]) + >>> Directive('ELLIPSIS', args=['-s']).effects(argv=[])[0] Effect(action='assign', key='ELLIPSIS', value=True) Doctest: >>> # requirement directive with module >>> directive = list(Directive.extract('# xdoctest: requires(module:xdoctest)'))[0] >>> print('directive = {}'.format(directive)) - >>> print('directive.effect() = {}'.format(directive.effect())) + >>> print('directive.effects() = {}'.format(directive.effects()[0])) directive = - directive.effect() = Effect(action='noop', key='REQUIRES', value=None) + directive.effects() = Effect(action='noop', key='REQUIRES', value='module:xdoctest') >>> directive = list(Directive.extract('# xdoctest: requires(module:notamodule)'))[0] >>> print('directive = {}'.format(directive)) - >>> print('directive.effect() = {}'.format(directive.effect())) + >>> print('directive.effects() = {}'.format(directive.effects()[0])) directive = - directive.effect() = Effect(action='set.add', key='REQUIRES', value='module:notamodule') + directive.effects() = Effect(action='set.add', key='REQUIRES', value='module:notamodule') >>> directive = list(Directive.extract('# xdoctest: requires(env:FOO==1)'))[0] >>> print('directive = {}'.format(directive)) - >>> print('directive.effect() = {}'.format(directive.effect(environ={}))) + >>> print('directive.effects() = {}'.format(directive.effects(environ={})[0])) directive = - directive.effect() = Effect(action='set.add', key='REQUIRES', value='env:FOO==1') + directive.effects() = Effect(action='set.add', key='REQUIRES', value='env:FOO==1') >>> directive = list(Directive.extract('# xdoctest: requires(env:FOO==1)'))[0] >>> print('directive = {}'.format(directive)) - >>> print('directive.effect() = {}'.format(directive.effect(environ={'FOO': '1'}))) + >>> print('directive.effects() = {}'.format(directive.effects(environ={'FOO': '1'})[0])) directive = - directive.effect() = Effect(action='noop', key='REQUIRES', value=None) + directive.effects() = Effect(action='noop', key='REQUIRES', value='env:FOO==1') + + >>> # requirement directive with two args + >>> directive = list(Directive.extract('# xdoctest: requires(--show, module:xdoctest)'))[0] + >>> print('directive = {}'.format(directive)) + >>> for effect in directive.effects(): + >>> print('effect = {!r}'.format(effect)) + directive = + effect = Effect(action='set.add', key='REQUIRES', value='--show') + effect = Effect(action='noop', key='REQUIRES', value='module:xdoctest') """ key = self.name value = None + effects = [] if self.name == 'REQUIRES': # Special handling of REQUIRES - arg, = self._unpack_args(1) - if _is_requires_satisfied(arg, argv=argv, environ=environ): - # If the requirement is met, then do nothing, - action = 'noop' - else: - # otherwise, add or remove the condtion from REQUIREMENTS, - # depending on if the directive is positive or negative. + for arg in self.args: value = arg - if self.positive: - action = 'set.add' + if _is_requires_satisfied(arg, argv=argv, environ=environ): + # If the requirement is met, then do nothing, + action = 'noop' else: - action = 'set.remove' + # otherwise, add or remove the condtion from REQUIREMENTS, + # depending on if the directive is positive or negative. + if self.positive: + action = 'set.add' + else: + action = 'set.remove' + effects.append(Effect(action, key, value)) elif key.startswith('REPORT_'): # Special handling of report style if self.positive: action = 'noop' else: action = 'set_report_style' + effects.append(Effect(action, key, value)) else: # The action overwrites state[key] using value action = 'assign' value = self.positive - return Effect(action, key, value) + effects.append(Effect(action, key, value)) + return effects + + +def _split_opstr(optstr): + """ + Simplified balanced paren logic to only split commas outside of parens + + Example: + >>> optstr = '+FOO, REQUIRES(foo,bar), +ELLIPSIS' + >>> _split_opstr(optstr) + ['+FOO', 'REQUIRES(foo,bar)', '+ELLIPSIS'] + """ + import re + stack = [] + split_pos = [] + for match in re.finditer(r',|\(|\)', optstr): + token = match.group() + if token == ',' and not stack: + # Only split when there are no parens + split_pos.append(match.start()) + elif token == '(': + stack.append(token) + elif token == ')': + stack.pop() + assert len(stack) == 0, 'parens not balanced' + + parts = [] + prev = 0 + for curr in split_pos: + parts.append(optstr[prev:curr].strip()) + prev = curr + 1 + curr = None + parts.append(optstr[prev:curr].strip()) + return parts def _is_requires_satisfied(arg, argv=None, environ=None): @@ -622,8 +685,9 @@ def parse_directive_optstr(optpart, inline=None): paren_pos = optpart.find('(') if paren_pos > -1: # handle simple paren case. - # TODO expand or remove - args = [optpart[paren_pos + 1:optpart.find(')')]] + body = optpart[paren_pos + 1:optpart.find(')')] + args = [a.strip() for a in body.split(',')] + # args = [optpart[paren_pos + 1:optpart.find(')')]] optpart = optpart[:paren_pos] else: args = [] diff --git a/xdoctest/doctest_example.py b/xdoctest/doctest_example.py index e25328f3..487ff0e2 100644 --- a/xdoctest/doctest_example.py +++ b/xdoctest/doctest_example.py @@ -1,6 +1,9 @@ # -*- coding: utf-8 -*- """ This module defines the main class that holds a DocTest example + +TODO: + - [ ] Rename DocTest to Doctest? """ from __future__ import absolute_import, division, print_function, unicode_literals import __future__ @@ -19,24 +22,23 @@ from xdoctest import checker from xdoctest import exceptions -from distutils.version import LooseVersion - - -EVAL_MIGHT_RETURN_COROUTINE = LooseVersion(sys.version.split(' ')[0]) >= LooseVersion('3.9.0') +# I believe the original reason for this hack was fixed in 3.9rc (The CI will +# tell us otherwise if this is incorrec) +# from distutils.version import LooseVersion +# EVAL_MIGHT_RETURN_COROUTINE = LooseVersion(sys.version.split(' ')[0]) >= LooseVersion('3.9.0') +EVAL_MIGHT_RETURN_COROUTINE = False -class Config(dict): +class DoctestConfig(dict): """ Doctest configuration - TODO: - -[ ] rename to DoctestConfig? - Static configuration for collection, execution, and reporting doctests. - Note dynamic directives are not managed by Config, they use RuntimeState. + Note dynamic directives are not managed by DoctestConfig, they use + RuntimeState. """ def __init__(self, *args, **kwargs): - super(Config, self).__init__(*args, **kwargs) + super(DoctestConfig, self).__init__(*args, **kwargs) self.update({ # main options exposed by command line runner/plugin 'colored': hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(), @@ -136,27 +138,35 @@ class DocTest(object): Attributes: - docsrc (str): doctest source code + docsrc (str): + doctest source code - modpath (PathLike, optional): module the source was read from + modpath (str | PathLike, default=None): + module the source was read from - callname (PathLike, optional): name of the function/method/module being tested + callname (str, default=None): + name of the function/method/class/module being tested - num (int): the index of the doctest in the docstring. (i.e. - this object refers to the num-th doctest within a docstring) + num (int, default=0): + the index of the doctest in the docstring. (i.e. this object + refers to the num-th doctest within a docstring) - lineno (int): the line (starting from 1) in the file that the doctest - begins on. (i.e. if you were to go to this line in the file, the - first line of the doctest should be on this line). + lineno (int, default=1): + The line (starting from 1) in the file that the doctest begins on. + (i.e. if you were to go to this line in the file, the first line of + the doctest should be on this line). - fpath (PathLike): typically the same as modpath, only specified for - non-python files + fpath (PathLike): + Typically the same as modpath, only specified for non-python files + (e.g. rst files). - block_type (str): code indicating the type of block. Can be - ('Example', 'Doctest', 'Script', 'Benchmark', 'zero-arg', None). + block_type (str, default=None): + Hint indicating the type of docstring block. Can be ('Example', + 'Doctest', 'Script', 'Benchmark', 'zero-arg', etc..). - mode (str, default='pytest'): hint at what created / is running this - doctest. + mode (str, default='pytest'): + Hint at what created / is running this doctest. This impacts + how results are presented and what doctests are skipped. CommandLine: xdoctest -m xdoctest.doctest_example DocTest @@ -178,27 +188,41 @@ class DocTest(object): """ + # Constant values for unknown attributes + UNKNOWN_MODNAME = '' + UNKNOWN_MODPATH = '' + UNKNOWN_CALLNAME = '' + UNKNOWN_FPATH = '' + def __init__(self, docsrc, modpath=None, callname=None, num=0, lineno=1, fpath=None, block_type=None, mode='pytest'): - + import types # if we know the google block type it is recorded self.block_type = block_type - self.config = Config() + self.config = DoctestConfig() + self.module = None self.modpath = modpath + self.fpath = fpath if modpath is None: - self.modname = '' - self.modpath = '' + self.modname = self.UNKNOWN_MODNAME + self.modpath = self.UNKNOWN_MODPATH + elif isinstance(modpath, types.ModuleType): + self.fpath = modpath + self.module = modpath + self.modname = modpath.__name__ + self.modpath = getattr(self.module, '__file__', self.UNKNOWN_MODPATH) else: if fpath is not None: - assert fpath == modpath, ( - 'only specify fpath for non-python files') + if fpath != modpath: + raise AssertionError( + 'only specify fpath for non-python files') self.fpath = modpath self.modname = static.modpath_to_modname(modpath) if callname is None: - self.callname = '' + self.callname = self.UNKNOWN_CALLNAME else: self.callname = callname self.docsrc = docsrc @@ -219,7 +243,6 @@ def __init__(self, docsrc, modpath=None, callname=None, num=0, self._runstate = None - self.module = None # Maintain global variables that this test will have access to self.global_namespace = {} # Hint at what is running this doctest @@ -454,6 +477,10 @@ def run(self, verbose=None, on_error=None): """ Executes the doctest, checks the results, reports the outcome. + Args: + verbose (int): verbosity level + on_error (str): can be 'raise' or 'return' + Returns: Dict : summary """ @@ -878,7 +905,7 @@ def repr_failure(self, with_tb=True): colored = self.config['colored'] if fail_lineno is not None: - fpath = '' if self.fpath is None else self.fpath + fpath = self.UNKNOWN_FPATH if self.fpath is None else self.fpath lines += [' File "{}", line {},'.format(fpath, fail_lineno) + self._color(' <- wrt source file', 'red')] diff --git a/xdoctest/dynamic_analysis.py b/xdoctest/dynamic_analysis.py index 37e97922..3fa991ed 100644 --- a/xdoctest/dynamic_analysis.py +++ b/xdoctest/dynamic_analysis.py @@ -9,12 +9,21 @@ import six -def parse_dynamic_calldefs(modpath=None): +def parse_dynamic_calldefs(modpath_or_module): """ Dynamic parsing of module doctestable items. - While this does execute module code it is needed for testing extension - libraries. + Unlike static parsing this forces execution of the module code before + test-time, however the former is limited to plain-text python files whereas + this can discover doctests in binary extension libraries. + + Args: + modpath_or_module (str | Module): path to module or the module itself + + Returns: + Dict[str, CallDefNode]: + maping from callnames to CallDefNodes, which contain + info about the item with the doctest. CommandLine: python -m xdoctest.dynamic_analysis parse_dynamic_calldefs @@ -32,9 +41,24 @@ def parse_dynamic_calldefs(modpath=None): ... print(' * len(calldef.docstr) = {}'.format(len(calldef.docstr))) """ from xdoctest import static_analysis as static - from xdoctest import utils # NOQA - # Possible option for dynamic parsing - module = utils.import_module_from_path(modpath) + + import types + if isinstance(modpath_or_module, types.ModuleType): + module = modpath_or_module + else: + modpath = modpath_or_module + if modpath.endswith('.ipynb'): + """ + modpath = ub.expandpath("~/code/xdoctest/testing/notebook_with_doctests.ipynb") + xdoctest ~/code/xdoctest/testing/notebook_with_doctests.ipynb + """ + from xdoctest.utils import util_notebook + module = util_notebook.import_notebook_from_path(modpath) + else: + # Possible option for dynamic parsing + from xdoctest.utils import util_import + module = util_import.import_module_from_path(modpath) + calldefs = {} if getattr(module, '__doc__'): diff --git a/xdoctest/plugin.py b/xdoctest/plugin.py index bc2392ea..cc57b25c 100644 --- a/xdoctest/plugin.py +++ b/xdoctest/plugin.py @@ -104,7 +104,7 @@ def str_lower(x): dest='xdoctest_analysis') from xdoctest import doctest_example - doctest_example.Config()._update_argparse_cli( + doctest_example.DoctestConfig()._update_argparse_cli( group.addoption, prefix=['xdoctest', 'xdoc'], defaults=dict(verbose=0) ) @@ -206,7 +206,7 @@ def __getattr__(self, attr): ns = NamespaceLike(self.config) from xdoctest import doctest_example - self._examp_conf = doctest_example.Config()._populate_from_cli(ns) + self._examp_conf = doctest_example.DoctestConfig()._populate_from_cli(ns) class XDoctestTextfile(_XDoctestBase): diff --git a/xdoctest/runner.py b/xdoctest/runner.py index c0348d37..25a22466 100644 --- a/xdoctest/runner.py +++ b/xdoctest/runner.py @@ -95,11 +95,10 @@ def doctest_callable(func): # HACK: to add module context, this might not be robust. doctest.module = sys.modules[func.__module__] doctest.global_namespace[func.__name__] = func - doctest.run(verbose=3) -def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], +def doctest_module(module_identifier=None, command=None, argv=None, exclude=[], style='auto', verbose=None, config=None, durations=None, analysis='static'): """ @@ -107,8 +106,11 @@ def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], Main entry point into the testing framework. Args: - modname (str | ModuleType): - name of or path to the module, or the module itself. + module_identifier (str | ModuleType | None): + The name of / path to the module, or the live module itself. + If not specified, dynamic analysis will be used to introspect the + module that called this function and that module will be used. + This can also contain the callname followed by the `::` token. command (str): determines which doctests to run. @@ -122,7 +124,7 @@ def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], if specified, command line flags that might influence beharior. if None uses sys.argv. SeeAlso :func:_update_argparse_cli - SeeAlso :func:doctest_example.Config._update_argparse_cli + SeeAlso :func:doctest_example.DoctestConfig._update_argparse_cli verbose (int, default=None): Verbosity level. @@ -147,6 +149,24 @@ def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], Example: >>> modname = 'xdoctest.dynamic_analysis' >>> result = doctest_module(modname, 'list', argv=['']) + + Example: + >>> # xdoctest: +SKIP + >>> # Demonstrate different ways "module_identifier" can be specified + >>> # + >>> # Using a module name + >>> result = doctest_module('xdoctest.static_analysis') + >>> # + >>> # Using a module path + >>> result = doctest_module(os.expandpath('~/code/xdoctest/xdoctest/static_analysis.py')) + >>> # + >>> # Using a module itself + >>> from xdoctest import runner + >>> result = doctest_module(runner) + >>> # + >>> # Using a module name and a specific callname + >>> from xdoctest import runner + >>> result = doctest_module('xdoctest.static_analysis::parse_static_value') """ _log = partial(log, verbose=DEBUG) _log('------+ DEBUG +------') @@ -154,36 +174,59 @@ def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], _log('exclude = {!r}'.format(exclude)) _log('argv = {!r}'.format(argv)) _log('command = {!r}'.format(command)) - _log('modpath_or_name = {!r}'.format(modpath_or_name)) + _log('module_identifier = {!r}'.format(module_identifier)) _log('durations = {!r}'.format(durations)) _log('config = {!r}'.format(config)) _log('verbose = {!r}'.format(verbose)) _log('style = {!r}'.format(style)) _log('------+ /DEBUG +------') - # Determine package name via caller if not specified - if modpath_or_name is None: + modinfo = { + 'modpath': None, + 'modname': None, + 'module': None, + } + if module_identifier is None: + # Determine package name via caller if not specified frame_parent = dynamic_analysis.get_parent_frame() - modpath = frame_parent.f_globals['__file__'] + if '__file__' in frame_parent.f_globals: + modinfo['modpath'] = frame_parent.f_globals['__file__'] + else: + # Module might not exist as a path on disk, we might be trying to + # test an IPython session. + modinfo['modname'] = frame_parent.f_globals['__name__'] + modinfo['module'] = sys.modules[modinfo['modname']] else: - if command is None: + if isinstance(module_identifier, types.ModuleType): + modinfo['module'] = module_identifier + modinfo['modpath'] = modinfo['module'].__file__ + else: # Allow the modname to contain the name of the test to be run - if '::' in modpath_or_name: - modpath_or_name, command = modpath_or_name.split('::') - - if isinstance(modpath_or_name, types.ModuleType): - modpath_or_name = modpath_or_name.__file__ - - modpath = core._rectify_to_modpath(modpath_or_name) + if '::' in module_identifier: + if command is None: + modpath_or_name, command = module_identifier.split('::') + modinfo['modpath'] = core._rectify_to_modpath(modpath_or_name) + else: + raise ValueError('Command must be None if using :: syntax') + else: + modinfo['modpath'] = core._rectify_to_modpath(module_identifier) if config is None: - config = doctest_example.Config() + config = doctest_example.DoctestConfig() command, style, verbose = _parse_commandline(command, style, verbose, argv) _log = partial(log, verbose=verbose) - _log('Start doctest_module({!r})'.format(modpath_or_name)) + # Usually the "parseable_identifier" (i.e. the object we will extract the + # docstrings from) is a path to a module, but sometimes we will only be + # given the live module itself, hence the abstraction. + if modinfo['modpath'] is None: + parsable_identifier = modinfo['module'] + else: + parsable_identifier = modinfo['modpath'] + + _log('Start doctest_module({!r})'.format(parsable_identifier)) _log('Listing tests') if command is None: @@ -201,8 +244,9 @@ def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], # Parse all valid examples with warnings.catch_warnings(record=True) as parse_warnlist: - examples = list(core.parse_doctestables(modpath, exclude=exclude, - style=style, analysis=analysis)) + examples = list(core.parse_doctestables( + parsable_identifier, exclude=exclude, style=style, + analysis=analysis)) # Set each example mode to native to signal that we are using the # native xdoctest runner instead of the pytest runner for example in examples: @@ -226,7 +270,7 @@ def doctest_module(modpath_or_name=None, command=None, argv=None, exclude=[], if len(enabled_examples) == 0: # Check for zero-arg funcs - for example in _gather_zero_arg_examples(modpath): + for example in _gather_zero_arg_examples(parsable_identifier): if command in example.valid_testnames: enabled_examples.append(example) @@ -490,7 +534,7 @@ def _run_examples(enabled_examples, verbose, config=None, _log=None): def _parse_commandline(command=None, style='auto', verbose=None, argv=None): # Determine command via sys.argv if not specified - doctest_example.Config() + doctest_example.DoctestConfig() if argv is None: argv = sys.argv[1:] diff --git a/xdoctest/static_analysis.py b/xdoctest/static_analysis.py index ce562514..54abb116 100644 --- a/xdoctest/static_analysis.py +++ b/xdoctest/static_analysis.py @@ -31,6 +31,7 @@ class CallDefNode(object): """ Attributes: + callname (str): the name of the "calldef" doclineno (int): the line number (1 based) the docstring begins on doclineno_end (int): the line number (1 based) the docstring ends on """ @@ -44,10 +45,10 @@ def __init__(self, callname, lineno, docstr, doclineno, doclineno_end, self.lineno_end = None self.args = args - # def __str__(self): - # return '{}[{}:{}][{}]'.format( - # self.callname, self.lineno, self.lineno_end, - # self.doclineno) + def __str__(self): + return '{}[{}:{}][{}]'.format( + self.callname, self.lineno, self.lineno_end, + self.doclineno) class TopLevelVisitor(ast.NodeVisitor): @@ -633,7 +634,7 @@ def foo(): return lineno -def parse_calldefs(source=None, fpath=None): +def parse_static_calldefs(source=None, fpath=None): """ Statically finds top-level callable functions and methods in python source @@ -642,13 +643,15 @@ def parse_calldefs(source=None, fpath=None): fpath (str): filepath to read if source is not specified Returns: - dict(str, CallDefNode): map of callnames to tuples with def info + Dict[str, CallDefNode]: + maping from callnames to CallDefNodes, which contain + info about the item with the doctest. Example: >>> from xdoctest import static_analysis >>> fpath = static_analysis.__file__.replace('.pyc', '.py') - >>> calldefs = parse_calldefs(fpath=fpath) - >>> assert 'parse_calldefs' in calldefs + >>> calldefs = parse_static_calldefs(fpath=fpath) + >>> assert 'parse_static_calldefs' in calldefs """ if six.PY2: fpath = fpath.replace('.pyc', '.py') @@ -675,6 +678,14 @@ def parse_calldefs(source=None, fpath=None): raise +def parse_calldefs(source=None, fpath=None): + import warnings + warnings.warn(( + 'parse_calldefs is deprecated ' + 'use parse_static_calldefs instead'), DeprecationWarning) + return parse_static_calldefs(source=source, fpath=fpath) + + def _parse_static_node_value(node): """ Extract a constant value from a node if possible diff --git a/xdoctest/utils/util_notebook.py b/xdoctest/utils/util_notebook.py new file mode 100644 index 00000000..b39ba6e8 --- /dev/null +++ b/xdoctest/utils/util_notebook.py @@ -0,0 +1,288 @@ +""" +Utilities for handling Jupyter / IPython notebooks + +This code is copied and modified from nbimporter +(https://github.com/grst/nbimporter/blob/master/nbimporter.py) which is not +actively maintained (otherwise we would use it as a dependency). + +Note that using this behavior is very much discouraged, it would be far better +if you maintained your reusable code in separate python modules. See +https://github.com/grst/nbimporter for reasons. + +---- + +Allow for importing of IPython Notebooks as modules from Jupyter v4. + +Updated from module collated here: +https://github.com/adrn/ipython/blob/master/examples/Notebook/Importing%20Notebooks.ipynb + +Importing from a notebook is different from a module: because one +typically keeps many computations and tests besides exportable defs, +here we only run code which either defines a function or a class, or +imports code from other modules and notebooks. This behaviour can be +disabled by setting NotebookLoader.default_options['only_defs'] = False. + +Furthermore, in order to provide per-notebook initialisation, if a +special function __nbinit__() is defined in the notebook, it will be +executed the first time an import statement is. This behaviour can be +disabled by setting NotebookLoader.default_options['run_nbinit'] = False. + +Finally, you can set the encoding of the notebooks with +NotebookLoader.default_options['encoding']. The default is 'utf-8'. +""" + +import io +import os +import sys +import types +import ast +from os.path import basename, dirname + + +def _find_notebook(fullname, path=None): + """ Find a notebook, given its fully qualified name and an optional path + + This turns "foo.bar" into "foo/bar.ipynb" + and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar + does not exist. + """ + name = fullname.rsplit('.', 1)[-1] + if not path: + path = [''] + for d in path: + nb_path = os.path.join(d, name + ".ipynb") + if os.path.isfile(nb_path): + return nb_path + # let import Notebook_Name find "Notebook Name.ipynb" + nb_path = nb_path.replace("_", " ") + if os.path.isfile(nb_path): + return nb_path + + +class CellDeleter(ast.NodeTransformer): + """ Removes all nodes from an AST which are not suitable + for exporting out of a notebook. """ + def visit(self, node): + """ Visit a node. """ + if node.__class__.__name__ in ['Module', 'FunctionDef', 'ClassDef', + 'Import', 'ImportFrom']: + return node + return None + + +class NotebookLoader(object): + """ Module Loader for Jupyter Notebooks. """ + + default_options = { + 'only_defs': False, + 'run_nbinit': True, + 'encoding': 'utf-8' + } + + def __init__(self, path=None): + from IPython.core.interactiveshell import InteractiveShell + self.shell = InteractiveShell.instance() + self.path = path + self.options = self.default_options.copy() + + def load_module(self, fullname=None, fpath=None): + """import a notebook as a module""" + from IPython import get_ipython + import nbformat + if fpath is None: + fpath = _find_notebook(fullname, self.path) + + # load the notebook object + nb_version = nbformat.current_nbformat + + with io.open(fpath, 'r', encoding=self.options['encoding']) as f: + nb = nbformat.read(f, nb_version) + + # create the module and add it to sys.modules + # if name in sys.modules: + # return sys.modules[name] + mod = types.ModuleType(fullname) + mod.__file__ = fpath + mod.__loader__ = self + mod.__dict__['get_ipython'] = get_ipython + + # Only do something if it's a python notebook + # if nb.metadata.kernelspec.language != 'python': + # print("Ignoring '%s': not a python notebook." % fpath) + # return mod + + # print("Importing Jupyter notebook from %s" % fpath) + sys.modules[fullname] = mod + + # extra work to ensure that magics that would affect the user_ns + # actually affect the notebook module's ns + save_user_ns = self.shell.user_ns + self.shell.user_ns = mod.__dict__ + + try: + deleter = CellDeleter() + for cell in filter(lambda c: c.cell_type == 'code', nb.cells): + # transform the input into executable Python + code = self.shell.input_transformer_manager.transform_cell(cell.source) + if self.options['only_defs']: + # Remove anything that isn't a def or a class + tree = deleter.generic_visit(ast.parse(code)) + else: + tree = ast.parse(code) + # run the code in the module + codeobj = compile(tree, filename=fpath, mode='exec') + exec(codeobj, mod.__dict__) + finally: + self.shell.user_ns = save_user_ns + + # Run any initialisation if available, but only once + if self.options['run_nbinit'] and '__nbinit_done__' not in mod.__dict__: + try: + mod.__nbinit__() + mod.__nbinit_done__ = True + except (KeyError, AttributeError): + pass + + return mod + + +def import_notebook_from_path(ipynb_fpath, only_defs=False): + """ + Import an IPython notebook as a module from a full path and try to maintain + clean sys.path variables. + + Args: + ipynb_fpath (str | Path): path to the ipython notebook file to import + only_defs (bool, default=False): if True ignores all non-definition + statements + + Example: + >>> # xdoctest: +REQUIRES(PY3, module:IPython, module:nbconvert) + >>> from xdoctest import utils + >>> from os.path import join + >>> self = utils.TempDir() + >>> dpath = self.ensure() + >>> ipynb_fpath = join(dpath, 'test_import_notebook.ipydb') + >>> cells = [ + >>> utils.codeblock( + >>> ''' + >>> def foo(): + >>> return 'bar' + >>> '''), + >>> utils.codeblock( + >>> ''' + >>> x = 1 + >>> ''') + >>> ] + >>> _make_test_notebook_fpath(ipynb_fpath, cells) + >>> module = import_notebook_from_path(ipynb_fpath) + >>> assert module.foo() == 'bar' + >>> assert module.x == 1 + """ + ipynb_fname = basename(ipynb_fpath) + fname_noext = ipynb_fname.rsplit('.', 1)[0] + ipynb_modname = fname_noext.replace(' ', '_') + + # hack around the importlib machinery + loader = NotebookLoader() + loader.options['only_defs'] = only_defs + module = loader.load_module(ipynb_modname, ipynb_fpath) + return module + + +def execute_notebook(ipynb_fpath, timeout=None, verbose=None): + """ + Execute an IPython notebook in a separate kernel + + Args: + ipynb_fpath (str | Path): path to the ipython notebook file to import + + Returns: + nb : NotebookNode + The executed notebook. + resources : dictionary + Additional resources used in the conversion process. + + Example: + >>> # xdoctest: +REQUIRES(PY3, module:IPython, module:nbconvert) + >>> from xdoctest import utils + >>> from os.path import join + >>> self = utils.TempDir() + >>> dpath = self.ensure() + >>> ipynb_fpath = join(dpath, 'hello_world.ipydb') + >>> _make_test_notebook_fpath(ipynb_fpath, [utils.codeblock( + >>> ''' + >>> print('hello world') + >>> ''')]) + >>> nb, resources = execute_notebook(ipynb_fpath, verbose=3) + >>> print('resources = {!r}'.format(resources)) + >>> print('nb = {!r}'.format(nb)) + >>> for cell in nb['cells']: + >>> if len(cell['outputs']) != 1: + >>> import warnings + >>> warnings.warn('expected an output, is this the issue ' + >>> 'described [here](https://github.com/nteract/papermill/issues/426)?') + + """ + import nbformat + import logging + from nbconvert.preprocessors import ExecutePreprocessor + + dpath = dirname(ipynb_fpath) + ep = ExecutePreprocessor(timeout=timeout) + if verbose is None: + verbose = 0 + + if verbose > 1: + print('executing notebook in dpath = {!r}'.format(dpath)) + ep.log.setLevel(logging.DEBUG) + elif verbose > 0: + ep.log.setLevel(logging.INFO) + + with open(ipynb_fpath, 'r+') as file: + nb = nbformat.read(file, as_version=nbformat.NO_CONVERT) + nb, resources = ep.preprocess(nb, {'metadata': {'path': dpath}}) + # from nbconvert.preprocessors import executenb + # nb, resources = executenb(nb, cwd=dpath) + return nb, resources + + +def _make_test_notebook_fpath(fpath, cell_sources): + """ + Helper for testing + + Args: + fpath (str): file to write notebook to + cell_sources (List[str]): list of python code blocks + + References: + https://stackoverflow.com/questions/38193878/create-notebook-from-code + https://gist.github.com/fperez/9716279 + """ + import nbformat as nbf + import json + import jupyter_client.kernelspec + # TODO: is there an API to generate kernelspec json correctly? + kernel_name = jupyter_client.kernelspec.NATIVE_KERNEL_NAME + spec = jupyter_client.kernelspec.get_kernel_spec(kernel_name) + metadata = {'kernelspec': { + 'name': kernel_name, + 'display_name': spec.display_name, + 'language': spec.language, + }} + # Use nbformat API to create notebook structure and cell json + nb = nbf.v4.new_notebook(metadata=metadata) + for source in cell_sources: + nb['cells'].append(nbf.v4.new_code_cell(source)) + with open(fpath, 'w') as file: + json.dump(nb, file) + return fpath + + +if __name__ == '__main__': + """ + CommandLine: + python ~/code/xdoctest/xdoctest/utils/util_notebook.py all + """ + import xdoctest + xdoctest.doctest_module(__file__) diff --git a/xdoctest/utils/util_str.py b/xdoctest/utils/util_str.py index 22c9086c..3e107405 100644 --- a/xdoctest/utils/util_str.py +++ b/xdoctest/utils/util_str.py @@ -55,11 +55,20 @@ def color_text(text, color): If pygments is not installed plain text is returned. Example: + >>> import sys + >>> if sys.platform.startswith('win32'): + >>> import pytest + >>> pytest.skip() >>> text = 'raw text' >>> from xdoctest import utils - >>> if utils.modname_to_modpath('pygments'): + >>> from xdoctest.utils import util_str + >>> if utils.modname_to_modpath('pygments') and not util_str.NO_COLOR: >>> # Colors text only if pygments is installed - >>> ansi_text = utils.ensure_unicode(color_text(text, 'red')) + >>> import pygments + >>> print('pygments = {!r}'.format(pygments)) + >>> ansi_text1 = color_text(text, 'red') + >>> print('ansi_text1 = {!r}'.format(ansi_text1)) + >>> ansi_text = utils.ensure_unicode(ansi_text1) >>> prefix = utils.ensure_unicode('\x1b[31') >>> print('prefix = {!r}'.format(prefix)) >>> print('ansi_text = {!r}'.format(ansi_text)) @@ -84,17 +93,15 @@ def color_text(text, color): except ImportError as ex: import warnings warnings.warn('os is win32 and colorma is not installed {!r}'.format(ex)) - pass import os if os.environ.get('XDOC_WIN32_COLORS', 'False') == 'False': # hack: dont color on windows by default, but do init colorama return text - try: ansi_text = pygments.console.colorize(color, text) except KeyError: import warnings - warnings.warn('unable to fine color: {!r}'.format(color)) + warnings.warn('unable to find color: {!r}'.format(color)) return text except Exception as ex: import warnings @@ -187,6 +194,10 @@ def highlight_code(text, lexer_name='python', **kwargs): 'cxx': 'cpp', 'c': 'cpp', }.get(lexer_name.replace('.', ''), lexer_name) + import os + if os.environ.get('XDOC_WIN32_COLORS', 'False') == 'False': + # hack: dont color on windows by default, but do init colorama + return text try: import pygments import pygments.lexers