From 4e2cf07ab879a7d0bfce24aaf00594976fb9ffed Mon Sep 17 00:00:00 2001 From: Anthony Scopatz Date: Sat, 7 May 2011 01:01:17 -0500 Subject: [PATCH] Added some sphinx extentions from numpy and ipython. --- cython/nucname.pyx | 14 +- docs/sphinxext/LICENSE.txt | 141 +++ docs/sphinxext/__init__.py | 1 + docs/sphinxext/apigen.py | 427 +++++++++ docs/sphinxext/comment_eater.py | 158 ++++ docs/sphinxext/compiler_unparse.py | 860 ++++++++++++++++++ docs/sphinxext/docscrape.py | 500 ++++++++++ docs/sphinxext/docscrape_sphinx.py | 227 +++++ docs/sphinxext/inheritance_diagram.py | 407 +++++++++ .../sphinxext/ipython_console_highlighting.py | 114 +++ docs/sphinxext/ipython_directive.py | 635 +++++++++++++ docs/sphinxext/numpydoc.py | 169 ++++ docs/sphinxext/phantom_import.py | 162 ++++ docs/sphinxext/plot_directive.py | 636 +++++++++++++ docs/sphinxext/setup.py | 31 + docs/sphinxext/traitsdoc.py | 140 +++ 16 files changed, 4617 insertions(+), 5 deletions(-) create mode 100644 docs/sphinxext/LICENSE.txt create mode 100644 docs/sphinxext/__init__.py create mode 100644 docs/sphinxext/apigen.py create mode 100644 docs/sphinxext/comment_eater.py create mode 100644 docs/sphinxext/compiler_unparse.py create mode 100644 docs/sphinxext/docscrape.py create mode 100644 docs/sphinxext/docscrape_sphinx.py create mode 100644 docs/sphinxext/inheritance_diagram.py create mode 100644 docs/sphinxext/ipython_console_highlighting.py create mode 100644 docs/sphinxext/ipython_directive.py create mode 100644 docs/sphinxext/numpydoc.py create mode 100644 docs/sphinxext/phantom_import.py create mode 100644 docs/sphinxext/plot_directive.py create mode 100644 docs/sphinxext/setup.py create mode 100644 docs/sphinxext/traitsdoc.py diff --git a/cython/nucname.pyx b/cython/nucname.pyx index b6d53118f7..d539c57b75 100644 --- a/cython/nucname.pyx +++ b/cython/nucname.pyx @@ -85,11 +85,15 @@ def current_form(nuc): def zzaaam(nuc): """Converts a nuclide to its zzaaam (int) form. - Args: - * nuc (int or str): Input nuclide. - - Returns: - * newnuc (int): Output nuclide in zzaaam form. + Parameters + ---------- + nuc : int or str + Input nuclide. + + Returns + ------- + newnuc : int + Output nuclide in zzaaam form. """ if isinstance(nuc, basestring): diff --git a/docs/sphinxext/LICENSE.txt b/docs/sphinxext/LICENSE.txt new file mode 100644 index 0000000000..c569c8779f --- /dev/null +++ b/docs/sphinxext/LICENSE.txt @@ -0,0 +1,141 @@ +------------------------------------------------------------------------------- + The files + - ipython_console_highlighting.py + - ipython_directive.py + have the following license: + + The IPython licensing terms + +IPython is licensed under the terms of the Modified BSD License (also known as +New or Revised BSD), as follows: + +Copyright (c) 2008-2010, IPython Development Team +Copyright (c) 2001-2007, Fernando Perez. +Copyright (c) 2001, Janko Hauser +Copyright (c) 2001, Nathaniel Gray + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +Neither the name of the IPython Development Team nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------------------------------------------------------------------------------- + The files + - numpydoc.py + - autosummary.py + - autosummary_generate.py + - docscrape.py + - docscrape_sphinx.py + - phantom_import.py + have the following license: + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------- + The files + - compiler_unparse.py + - comment_eater.py + - traitsdoc.py + have the following license: + +This software is OSI Certified Open Source Software. +OSI Certified is a certification mark of the Open Source Initiative. + +Copyright (c) 2006, Enthought, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Enthought, Inc. nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------------------------------------------------------------------------------- + The files + - only_directives.py + - plot_directive.py + originate from Matplotlib (http://matplotlib.sf.net/) which has + the following license: + +Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. + +1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. + +4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. + diff --git a/docs/sphinxext/__init__.py b/docs/sphinxext/__init__.py new file mode 100644 index 0000000000..ae9073bc41 --- /dev/null +++ b/docs/sphinxext/__init__.py @@ -0,0 +1 @@ +from numpydoc import setup diff --git a/docs/sphinxext/apigen.py b/docs/sphinxext/apigen.py new file mode 100644 index 0000000000..12374096ec --- /dev/null +++ b/docs/sphinxext/apigen.py @@ -0,0 +1,427 @@ +"""Attempt to generate templates for module reference with Sphinx + +XXX - we exclude extension modules + +To include extension modules, first identify them as valid in the +``_uri2path`` method, then handle them in the ``_parse_module`` script. + +We get functions and classes by parsing the text of .py files. +Alternatively we could import the modules for discovery, and we'd have +to do that for extension modules. This would involve changing the +``_parse_module`` method to work via import and introspection, and +might involve changing ``discover_modules`` (which determines which +files are modules, and therefore which module URIs will be passed to +``_parse_module``). + +NOTE: this is a modified version of a script originally shipped with the +PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed +project.""" + +# Stdlib imports +import os +import re + +# Functions and classes +class ApiDocWriter(object): + ''' Class for automatic detection and parsing of API docs + to Sphinx-parsable reST format''' + + # only separating first two levels + rst_section_levels = ['*', '=', '-', '~', '^'] + + def __init__(self, + package_name, + rst_extension='.rst', + package_skip_patterns=None, + module_skip_patterns=None, + ): + ''' Initialize package for parsing + + Parameters + ---------- + package_name : string + Name of the top-level package. *package_name* must be the + name of an importable package + rst_extension : string, optional + Extension for reST files, default '.rst' + package_skip_patterns : None or sequence of {strings, regexps} + Sequence of strings giving URIs of packages to be excluded + Operates on the package path, starting at (including) the + first dot in the package path, after *package_name* - so, + if *package_name* is ``sphinx``, then ``sphinx.util`` will + result in ``.util`` being passed for earching by these + regexps. If is None, gives default. Default is: + ['\.tests$'] + module_skip_patterns : None or sequence + Sequence of strings giving URIs of modules to be excluded + Operates on the module name including preceding URI path, + back to the first dot after *package_name*. For example + ``sphinx.util.console`` results in the string to search of + ``.util.console`` + If is None, gives default. Default is: + ['\.setup$', '\._'] + ''' + if package_skip_patterns is None: + package_skip_patterns = ['\\.tests$'] + if module_skip_patterns is None: + module_skip_patterns = ['\\.setup$', '\\._'] + self.package_name = package_name + self.rst_extension = rst_extension + self.package_skip_patterns = package_skip_patterns + self.module_skip_patterns = module_skip_patterns + + def get_package_name(self): + return self._package_name + + def set_package_name(self, package_name): + ''' Set package_name + + >>> docwriter = ApiDocWriter('sphinx') + >>> import sphinx + >>> docwriter.root_path == sphinx.__path__[0] + True + >>> docwriter.package_name = 'docutils' + >>> import docutils + >>> docwriter.root_path == docutils.__path__[0] + True + ''' + # It's also possible to imagine caching the module parsing here + self._package_name = package_name + self.root_module = __import__(package_name) + self.root_path = self.root_module.__path__[0] + self.written_modules = None + + package_name = property(get_package_name, set_package_name, None, + 'get/set package_name') + + def _get_object_name(self, line): + ''' Get second token in line + >>> docwriter = ApiDocWriter('sphinx') + >>> docwriter._get_object_name(" def func(): ") + 'func' + >>> docwriter._get_object_name(" class Klass(object): ") + 'Klass' + >>> docwriter._get_object_name(" class Klass: ") + 'Klass' + ''' + name = line.split()[1].split('(')[0].strip() + # in case we have classes which are not derived from object + # ie. old style classes + return name.rstrip(':') + + def _uri2path(self, uri): + ''' Convert uri to absolute filepath + + Parameters + ---------- + uri : string + URI of python module to return path for + + Returns + ------- + path : None or string + Returns None if there is no valid path for this URI + Otherwise returns absolute file system path for URI + + Examples + -------- + >>> docwriter = ApiDocWriter('sphinx') + >>> import sphinx + >>> modpath = sphinx.__path__[0] + >>> res = docwriter._uri2path('sphinx.builder') + >>> res == os.path.join(modpath, 'builder.py') + True + >>> res = docwriter._uri2path('sphinx') + >>> res == os.path.join(modpath, '__init__.py') + True + >>> docwriter._uri2path('sphinx.does_not_exist') + + ''' + if uri == self.package_name: + return os.path.join(self.root_path, '__init__.py') + path = uri.replace('.', os.path.sep) + path = path.replace(self.package_name + os.path.sep, '') + path = os.path.join(self.root_path, path) + # XXX maybe check for extensions as well? + if os.path.exists(path + '.py'): # file + path += '.py' + elif os.path.exists(os.path.join(path, '__init__.py')): + path = os.path.join(path, '__init__.py') + else: + return None + return path + + def _path2uri(self, dirpath): + ''' Convert directory path to uri ''' + relpath = dirpath.replace(self.root_path, self.package_name) + if relpath.startswith(os.path.sep): + relpath = relpath[1:] + return relpath.replace(os.path.sep, '.') + + def _parse_module(self, uri): + ''' Parse module defined in *uri* ''' + filename = self._uri2path(uri) + if filename is None: + # nothing that we could handle here. + return ([],[]) + f = open(filename, 'rt') + functions, classes = self._parse_lines(f) + f.close() + return functions, classes + + def _parse_lines(self, linesource): + ''' Parse lines of text for functions and classes ''' + functions = [] + classes = [] + for line in linesource: + if line.startswith('def ') and line.count('('): + # exclude private stuff + name = self._get_object_name(line) + if not name.startswith('_'): + functions.append(name) + elif line.startswith('class '): + # exclude private stuff + name = self._get_object_name(line) + if not name.startswith('_'): + classes.append(name) + else: + pass + functions.sort() + classes.sort() + return functions, classes + + def generate_api_doc(self, uri): + '''Make autodoc documentation template string for a module + + Parameters + ---------- + uri : string + python location of module - e.g 'sphinx.builder' + + Returns + ------- + S : string + Contents of API doc + ''' + # get the names of all classes and functions + functions, classes = self._parse_module(uri) + if not len(functions) and not len(classes): + print 'WARNING: Empty -',uri # dbg + return '' + + # Make a shorter version of the uri that omits the package name for + # titles + uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) + + ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' + + chap_title = uri_short + ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) + + '\n\n') + + # Set the chapter title to read 'module' for all modules except for the + # main packages + if '.' in uri: + title = 'Module: :mod:`' + uri_short + '`' + else: + title = ':mod:`' + uri_short + '`' + ad += title + '\n' + self.rst_section_levels[2] * len(title) + + if len(classes): + ad += '\nInheritance diagram for ``%s``:\n\n' % uri + ad += '.. inheritance-diagram:: %s \n' % uri + ad += ' :parts: 3\n' + + ad += '\n.. automodule:: ' + uri + '\n' + ad += '\n.. currentmodule:: ' + uri + '\n' + multi_class = len(classes) > 1 + multi_fx = len(functions) > 1 + if multi_class: + ad += '\n' + 'Classes' + '\n' + \ + self.rst_section_levels[2] * 7 + '\n' + elif len(classes) and multi_fx: + ad += '\n' + 'Class' + '\n' + \ + self.rst_section_levels[2] * 5 + '\n' + for c in classes: + ad += '\n:class:`' + c + '`\n' \ + + self.rst_section_levels[multi_class + 2 ] * \ + (len(c)+9) + '\n\n' + ad += '\n.. autoclass:: ' + c + '\n' + # must NOT exclude from index to keep cross-refs working + ad += ' :members:\n' \ + ' :undoc-members:\n' \ + ' :show-inheritance:\n' \ + ' :inherited-members:\n' \ + '\n' \ + ' .. automethod:: __init__\n' + if multi_fx: + ad += '\n' + 'Functions' + '\n' + \ + self.rst_section_levels[2] * 9 + '\n\n' + elif len(functions) and multi_class: + ad += '\n' + 'Function' + '\n' + \ + self.rst_section_levels[2] * 8 + '\n\n' + for f in functions: + # must NOT exclude from index to keep cross-refs working + ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' + return ad + + def _survives_exclude(self, matchstr, match_type): + ''' Returns True if *matchstr* does not match patterns + + ``self.package_name`` removed from front of string if present + + Examples + -------- + >>> dw = ApiDocWriter('sphinx') + >>> dw._survives_exclude('sphinx.okpkg', 'package') + True + >>> dw.package_skip_patterns.append('^\\.badpkg$') + >>> dw._survives_exclude('sphinx.badpkg', 'package') + False + >>> dw._survives_exclude('sphinx.badpkg', 'module') + True + >>> dw._survives_exclude('sphinx.badmod', 'module') + True + >>> dw.module_skip_patterns.append('^\\.badmod$') + >>> dw._survives_exclude('sphinx.badmod', 'module') + False + ''' + if match_type == 'module': + patterns = self.module_skip_patterns + elif match_type == 'package': + patterns = self.package_skip_patterns + else: + raise ValueError('Cannot interpret match type "%s"' + % match_type) + # Match to URI without package name + L = len(self.package_name) + if matchstr[:L] == self.package_name: + matchstr = matchstr[L:] + for pat in patterns: + try: + pat.search + except AttributeError: + pat = re.compile(pat) + if pat.search(matchstr): + return False + return True + + def discover_modules(self): + ''' Return module sequence discovered from ``self.package_name`` + + + Parameters + ---------- + None + + Returns + ------- + mods : sequence + Sequence of module names within ``self.package_name`` + + Examples + -------- + >>> dw = ApiDocWriter('sphinx') + >>> mods = dw.discover_modules() + >>> 'sphinx.util' in mods + True + >>> dw.package_skip_patterns.append('\.util$') + >>> 'sphinx.util' in dw.discover_modules() + False + >>> + ''' + modules = [self.package_name] + # raw directory parsing + for dirpath, dirnames, filenames in os.walk(self.root_path): + # Check directory names for packages + root_uri = self._path2uri(os.path.join(self.root_path, + dirpath)) + for dirname in dirnames[:]: # copy list - we modify inplace + package_uri = '.'.join((root_uri, dirname)) + if (self._uri2path(package_uri) and + self._survives_exclude(package_uri, 'package')): + modules.append(package_uri) + else: + dirnames.remove(dirname) + # Check filenames for modules + for filename in filenames: + module_name = filename[:-3] + module_uri = '.'.join((root_uri, module_name)) + if (self._uri2path(module_uri) and + self._survives_exclude(module_uri, 'module')): + modules.append(module_uri) + return sorted(modules) + + def write_modules_api(self, modules,outdir): + # write the list + written_modules = [] + for m in modules: + api_str = self.generate_api_doc(m) + if not api_str: + continue + # write out to file + outfile = os.path.join(outdir, + m + self.rst_extension) + fileobj = open(outfile, 'wt') + fileobj.write(api_str) + fileobj.close() + written_modules.append(m) + self.written_modules = written_modules + + def write_api_docs(self, outdir): + """Generate API reST files. + + Parameters + ---------- + outdir : string + Directory name in which to store files + We create automatic filenames for each module + + Returns + ------- + None + + Notes + ----- + Sets self.written_modules to list of written modules + """ + if not os.path.exists(outdir): + os.mkdir(outdir) + # compose list of modules + modules = self.discover_modules() + self.write_modules_api(modules,outdir) + + def write_index(self, outdir, froot='gen', relative_to=None): + """Make a reST API index file from written files + + Parameters + ---------- + path : string + Filename to write index to + outdir : string + Directory to which to write generated index file + froot : string, optional + root (filename without extension) of filename to write to + Defaults to 'gen'. We add ``self.rst_extension``. + relative_to : string + path to which written filenames are relative. This + component of the written file path will be removed from + outdir, in the generated index. Default is None, meaning, + leave path as it is. + """ + if self.written_modules is None: + raise ValueError('No modules written') + # Get full filename path + path = os.path.join(outdir, froot+self.rst_extension) + # Path written into index is relative to rootpath + if relative_to is not None: + relpath = outdir.replace(relative_to + os.path.sep, '') + else: + relpath = outdir + idx = open(path,'wt') + w = idx.write + w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') + w('.. toctree::\n\n') + for f in self.written_modules: + w(' %s\n' % os.path.join(relpath,f)) + idx.close() diff --git a/docs/sphinxext/comment_eater.py b/docs/sphinxext/comment_eater.py new file mode 100644 index 0000000000..e11eea9021 --- /dev/null +++ b/docs/sphinxext/comment_eater.py @@ -0,0 +1,158 @@ +from cStringIO import StringIO +import compiler +import inspect +import textwrap +import tokenize + +from compiler_unparse import unparse + + +class Comment(object): + """ A comment block. + """ + is_comment = True + def __init__(self, start_lineno, end_lineno, text): + # int : The first line number in the block. 1-indexed. + self.start_lineno = start_lineno + # int : The last line number. Inclusive! + self.end_lineno = end_lineno + # str : The text block including '#' character but not any leading spaces. + self.text = text + + def add(self, string, start, end, line): + """ Add a new comment line. + """ + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + self.text += string + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno, self.text) + + +class NonComment(object): + """ A non-comment block of code. + """ + is_comment = False + def __init__(self, start_lineno, end_lineno): + self.start_lineno = start_lineno + self.end_lineno = end_lineno + + def add(self, string, start, end, line): + """ Add lines to the block. + """ + if string.strip(): + # Only add if not entirely whitespace. + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno) + + +class CommentBlocker(object): + """ Pull out contiguous comment blocks. + """ + def __init__(self): + # Start with a dummy. + self.current_block = NonComment(0, 0) + + # All of the blocks seen so far. + self.blocks = [] + + # The index mapping lines of code to their associated comment blocks. + self.index = {} + + def process_file(self, file): + """ Process a file object. + """ + for token in tokenize.generate_tokens(file.next): + self.process_token(*token) + self.make_index() + + def process_token(self, kind, string, start, end, line): + """ Process a single token. + """ + if self.current_block.is_comment: + if kind == tokenize.COMMENT: + self.current_block.add(string, start, end, line) + else: + self.new_noncomment(start[0], end[0]) + else: + if kind == tokenize.COMMENT: + self.new_comment(string, start, end, line) + else: + self.current_block.add(string, start, end, line) + + def new_noncomment(self, start_lineno, end_lineno): + """ We are transitioning from a noncomment to a comment. + """ + block = NonComment(start_lineno, end_lineno) + self.blocks.append(block) + self.current_block = block + + def new_comment(self, string, start, end, line): + """ Possibly add a new comment. + + Only adds a new comment if this comment is the only thing on the line. + Otherwise, it extends the noncomment block. + """ + prefix = line[:start[1]] + if prefix.strip(): + # Oops! Trailing comment, not a comment block. + self.current_block.add(string, start, end, line) + else: + # A comment block. + block = Comment(start[0], end[0], string) + self.blocks.append(block) + self.current_block = block + + def make_index(self): + """ Make the index mapping lines of actual code to their associated + prefix comments. + """ + for prev, block in zip(self.blocks[:-1], self.blocks[1:]): + if not block.is_comment: + self.index[block.start_lineno] = prev + + def search_for_comment(self, lineno, default=None): + """ Find the comment block just before the given line number. + + Returns None (or the specified default) if there is no such block. + """ + if not self.index: + self.make_index() + block = self.index.get(lineno, None) + text = getattr(block, 'text', default) + return text + + +def strip_comment_marker(text): + """ Strip # markers at the front of a block of comment text. + """ + lines = [] + for line in text.splitlines(): + lines.append(line.lstrip('#')) + text = textwrap.dedent('\n'.join(lines)) + return text + + +def get_class_traits(klass): + """ Yield all of the documentation for trait definitions on a class object. + """ + # FIXME: gracefully handle errors here or in the caller? + source = inspect.getsource(klass) + cb = CommentBlocker() + cb.process_file(StringIO(source)) + mod_ast = compiler.parse(source) + class_ast = mod_ast.node.nodes[0] + for node in class_ast.code.nodes: + # FIXME: handle other kinds of assignments? + if isinstance(node, compiler.ast.Assign): + name = node.nodes[0].name + rhs = unparse(node.expr).strip() + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + yield name, rhs, doc + diff --git a/docs/sphinxext/compiler_unparse.py b/docs/sphinxext/compiler_unparse.py new file mode 100644 index 0000000000..ffcf51b353 --- /dev/null +++ b/docs/sphinxext/compiler_unparse.py @@ -0,0 +1,860 @@ +""" Turn compiler.ast structures back into executable python code. + + The unparse method takes a compiler.ast tree and transforms it back into + valid python code. It is incomplete and currently only works for + import statements, function calls, function definitions, assignments, and + basic expressions. + + Inspired by python-2.5-svn/Demo/parser/unparse.py + + fixme: We may want to move to using _ast trees because the compiler for + them is about 6 times faster than compiler.compile. +""" + +import sys +import cStringIO +from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +def unparse(ast, single_line_functions=False): + s = cStringIO.StringIO() + UnparseCompilerAst(ast, s, single_line_functions) + return s.getvalue().lstrip() + +op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, + 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +class UnparseCompilerAst: + """ Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. + """ + + ######################################################################### + # object interface. + ######################################################################### + + def __init__(self, tree, file = sys.stdout, single_line_functions=False): + """ Unparser(tree, file=sys.stdout) -> None. + + Print the source for tree to file. + """ + self.f = file + self._single_func = single_line_functions + self._do_indent = True + self._indent = 0 + self._dispatch(tree) + self._write("\n") + self.f.flush() + + ######################################################################### + # Unparser private interface. + ######################################################################### + + ### format, output, and dispatch methods ################################ + + def _fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + if self._do_indent: + self._write("\n"+" "*self._indent + text) + else: + self._write(text) + + def _write(self, text): + "Append a piece of text to the current line." + self.f.write(text) + + def _enter(self): + "Print ':', and increase the indentation." + self._write(": ") + self._indent += 1 + + def _leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def _dispatch(self, tree): + "_dispatcher function, _dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self._dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + return + meth(tree) + + + ######################################################################### + # compiler.ast unparsing methods. + # + # There should be one method per concrete grammar type. They are + # organized in alphabetical order. + ######################################################################### + + def _Add(self, t): + self.__binary_op(t, '+') + + def _And(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") and (") + self._write(")") + + def _AssAttr(self, t): + """ Handle assigning an attribute of an object + """ + self._dispatch(t.expr) + self._write('.'+t.attrname) + + def _Assign(self, t): + """ Expression Assignment such as "a = 1". + + This only handles assignment in expressions. Keyword assignment + is handled separately. + """ + self._fill() + for target in t.nodes: + self._dispatch(target) + self._write(" = ") + self._dispatch(t.expr) + if not self._do_indent: + self._write('; ') + + def _AssName(self, t): + """ Name on left hand side of expression. + + Treat just like a name on the right side of an expression. + """ + self._Name(t) + + def _AssTuple(self, t): + """ Tuple on left hand side of an expression. + """ + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + def _AugAssign(self, t): + """ +=,-=,*=,/=,**=, etc. operations + """ + + self._fill() + self._dispatch(t.node) + self._write(' '+t.op+' ') + self._dispatch(t.expr) + if not self._do_indent: + self._write(';') + + def _Bitand(self, t): + """ Bit and operation. + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" & ") + + def _Bitor(self, t): + """ Bit or operation + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" | ") + + def _CallFunc(self, t): + """ Function call. + """ + self._dispatch(t.node) + self._write("(") + comma = False + for e in t.args: + if comma: self._write(", ") + else: comma = True + self._dispatch(e) + if t.star_args: + if comma: self._write(", ") + else: comma = True + self._write("*") + self._dispatch(t.star_args) + if t.dstar_args: + if comma: self._write(", ") + else: comma = True + self._write("**") + self._dispatch(t.dstar_args) + self._write(")") + + def _Compare(self, t): + self._dispatch(t.expr) + for op, expr in t.ops: + self._write(" " + op + " ") + self._dispatch(expr) + + def _Const(self, t): + """ A constant value such as an integer value, 3, or a string, "hello". + """ + self._dispatch(t.value) + + def _Decorators(self, t): + """ Handle function decorators (eg. @has_units) + """ + for node in t.nodes: + self._dispatch(node) + + def _Dict(self, t): + self._write("{") + for i, (k, v) in enumerate(t.items): + self._dispatch(k) + self._write(": ") + self._dispatch(v) + if i < len(t.items)-1: + self._write(", ") + self._write("}") + + def _Discard(self, t): + """ Node for when return value is ignored such as in "foo(a)". + """ + self._fill() + self._dispatch(t.expr) + + def _Div(self, t): + self.__binary_op(t, '/') + + def _Ellipsis(self, t): + self._write("...") + + def _From(self, t): + """ Handle "from xyz import foo, bar as baz". + """ + # fixme: Are From and ImportFrom handled differently? + self._fill("from ") + self._write(t.modname) + self._write(" import ") + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Function(self, t): + """ Handle function definitions + """ + if t.decorators is not None: + self._fill("@") + self._dispatch(t.decorators) + self._fill("def "+t.name + "(") + defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) + for i, arg in enumerate(zip(t.argnames, defaults)): + self._write(arg[0]) + if arg[1] is not None: + self._write('=') + self._dispatch(arg[1]) + if i < len(t.argnames)-1: + self._write(', ') + self._write(")") + if self._single_func: + self._do_indent = False + self._enter() + self._dispatch(t.code) + self._leave() + self._do_indent = True + + def _Getattr(self, t): + """ Handle getting an attribute of an object + """ + if isinstance(t.expr, (Div, Mul, Sub, Add)): + self._write('(') + self._dispatch(t.expr) + self._write(')') + else: + self._dispatch(t.expr) + + self._write('.'+t.attrname) + + def _If(self, t): + self._fill() + + for i, (compare,code) in enumerate(t.tests): + if i == 0: + self._write("if ") + else: + self._write("elif ") + self._dispatch(compare) + self._enter() + self._fill() + self._dispatch(code) + self._leave() + self._write("\n") + + if t.else_ is not None: + self._write("else") + self._enter() + self._fill() + self._dispatch(t.else_) + self._leave() + self._write("\n") + + def _IfExp(self, t): + self._dispatch(t.then) + self._write(" if ") + self._dispatch(t.test) + + if t.else_ is not None: + self._write(" else (") + self._dispatch(t.else_) + self._write(")") + + def _Import(self, t): + """ Handle "import xyz.foo". + """ + self._fill("import ") + + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Keyword(self, t): + """ Keyword value assignment within function calls and definitions. + """ + self._write(t.name) + self._write("=") + self._dispatch(t.expr) + + def _List(self, t): + self._write("[") + for i,node in enumerate(t.nodes): + self._dispatch(node) + if i < len(t.nodes)-1: + self._write(", ") + self._write("]") + + def _Module(self, t): + if t.doc is not None: + self._dispatch(t.doc) + self._dispatch(t.node) + + def _Mul(self, t): + self.__binary_op(t, '*') + + def _Name(self, t): + self._write(t.name) + + def _NoneType(self, t): + self._write("None") + + def _Not(self, t): + self._write('not (') + self._dispatch(t.expr) + self._write(')') + + def _Or(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") or (") + self._write(")") + + def _Pass(self, t): + self._write("pass\n") + + def _Printnl(self, t): + self._fill("print ") + if t.dest: + self._write(">> ") + self._dispatch(t.dest) + self._write(", ") + comma = False + for node in t.nodes: + if comma: self._write(', ') + else: comma = True + self._dispatch(node) + + def _Power(self, t): + self.__binary_op(t, '**') + + def _Return(self, t): + self._fill("return ") + if t.value: + if isinstance(t.value, Tuple): + text = ', '.join([ name.name for name in t.value.asList() ]) + self._write(text) + else: + self._dispatch(t.value) + if not self._do_indent: + self._write('; ') + + def _Slice(self, t): + self._dispatch(t.expr) + self._write("[") + if t.lower: + self._dispatch(t.lower) + self._write(":") + if t.upper: + self._dispatch(t.upper) + #if t.step: + # self._write(":") + # self._dispatch(t.step) + self._write("]") + + def _Sliceobj(self, t): + for i, node in enumerate(t.nodes): + if i != 0: + self._write(":") + if not (isinstance(node, Const) and node.value is None): + self._dispatch(node) + + def _Stmt(self, tree): + for node in tree.nodes: + self._dispatch(node) + + def _Sub(self, t): + self.__binary_op(t, '-') + + def _Subscript(self, t): + self._dispatch(t.expr) + self._write("[") + for i, value in enumerate(t.subs): + if i != 0: + self._write(",") + self._dispatch(value) + self._write("]") + + def _TryExcept(self, t): + self._fill("try") + self._enter() + self._dispatch(t.body) + self._leave() + + for handler in t.handlers: + self._fill('except ') + self._dispatch(handler[0]) + if handler[1] is not None: + self._write(', ') + self._dispatch(handler[1]) + self._enter() + self._dispatch(handler[2]) + self._leave() + + if t.else_: + self._fill("else") + self._enter() + self._dispatch(t.else_) + self._leave() + + def _Tuple(self, t): + + if not t.nodes: + # Empty tuple. + self._write("()") + else: + self._write("(") + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + self._write(")") + + def _UnaryAdd(self, t): + self._write("+") + self._dispatch(t.expr) + + def _UnarySub(self, t): + self._write("-") + self._dispatch(t.expr) + + def _With(self, t): + self._fill('with ') + self._dispatch(t.expr) + if t.vars: + self._write(' as ') + self._dispatch(t.vars.name) + self._enter() + self._dispatch(t.body) + self._leave() + self._write('\n') + + def _int(self, t): + self._write(repr(t)) + + def __binary_op(self, t, symbol): + # Check if parenthesis are needed on left side and then dispatch + has_paren = False + left_class = str(t.left.__class__) + if (left_class in op_precedence.keys() and + op_precedence[left_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.left) + if has_paren: + self._write(')') + # Write the appropriate symbol for operator + self._write(symbol) + # Check if parenthesis are needed on the right side and then dispatch + has_paren = False + right_class = str(t.right.__class__) + if (right_class in op_precedence.keys() and + op_precedence[right_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.right) + if has_paren: + self._write(')') + + def _float(self, t): + # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' + # We prefer str here. + self._write(str(t)) + + def _str(self, t): + self._write(repr(t)) + + def _tuple(self, t): + self._write(str(t)) + + ######################################################################### + # These are the methods from the _ast modules unparse. + # + # As our needs to handle more advanced code increase, we may want to + # modify some of the methods below so that they work for compiler.ast. + ######################################################################### + +# # stmt +# def _Expr(self, tree): +# self._fill() +# self._dispatch(tree.value) +# +# def _Import(self, t): +# self._fill("import ") +# first = True +# for a in t.names: +# if first: +# first = False +# else: +# self._write(", ") +# self._write(a.name) +# if a.asname: +# self._write(" as "+a.asname) +# +## def _ImportFrom(self, t): +## self._fill("from ") +## self._write(t.module) +## self._write(" import ") +## for i, a in enumerate(t.names): +## if i == 0: +## self._write(", ") +## self._write(a.name) +## if a.asname: +## self._write(" as "+a.asname) +## # XXX(jpe) what is level for? +## +# +# def _Break(self, t): +# self._fill("break") +# +# def _Continue(self, t): +# self._fill("continue") +# +# def _Delete(self, t): +# self._fill("del ") +# self._dispatch(t.targets) +# +# def _Assert(self, t): +# self._fill("assert ") +# self._dispatch(t.test) +# if t.msg: +# self._write(", ") +# self._dispatch(t.msg) +# +# def _Exec(self, t): +# self._fill("exec ") +# self._dispatch(t.body) +# if t.globals: +# self._write(" in ") +# self._dispatch(t.globals) +# if t.locals: +# self._write(", ") +# self._dispatch(t.locals) +# +# def _Print(self, t): +# self._fill("print ") +# do_comma = False +# if t.dest: +# self._write(">>") +# self._dispatch(t.dest) +# do_comma = True +# for e in t.values: +# if do_comma:self._write(", ") +# else:do_comma=True +# self._dispatch(e) +# if not t.nl: +# self._write(",") +# +# def _Global(self, t): +# self._fill("global") +# for i, n in enumerate(t.names): +# if i != 0: +# self._write(",") +# self._write(" " + n) +# +# def _Yield(self, t): +# self._fill("yield") +# if t.value: +# self._write(" (") +# self._dispatch(t.value) +# self._write(")") +# +# def _Raise(self, t): +# self._fill('raise ') +# if t.type: +# self._dispatch(t.type) +# if t.inst: +# self._write(", ") +# self._dispatch(t.inst) +# if t.tback: +# self._write(", ") +# self._dispatch(t.tback) +# +# +# def _TryFinally(self, t): +# self._fill("try") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# self._fill("finally") +# self._enter() +# self._dispatch(t.finalbody) +# self._leave() +# +# def _excepthandler(self, t): +# self._fill("except ") +# if t.type: +# self._dispatch(t.type) +# if t.name: +# self._write(", ") +# self._dispatch(t.name) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _ClassDef(self, t): +# self._write("\n") +# self._fill("class "+t.name) +# if t.bases: +# self._write("(") +# for a in t.bases: +# self._dispatch(a) +# self._write(", ") +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _FunctionDef(self, t): +# self._write("\n") +# for deco in t.decorators: +# self._fill("@") +# self._dispatch(deco) +# self._fill("def "+t.name + "(") +# self._dispatch(t.args) +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _For(self, t): +# self._fill("for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# def _While(self, t): +# self._fill("while ") +# self._dispatch(t.test) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# # expr +# def _Str(self, tree): +# self._write(repr(tree.s)) +## +# def _Repr(self, t): +# self._write("`") +# self._dispatch(t.value) +# self._write("`") +# +# def _Num(self, t): +# self._write(repr(t.n)) +# +# def _ListComp(self, t): +# self._write("[") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write("]") +# +# def _GeneratorExp(self, t): +# self._write("(") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write(")") +# +# def _comprehension(self, t): +# self._write(" for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# for if_clause in t.ifs: +# self._write(" if ") +# self._dispatch(if_clause) +# +# def _IfExp(self, t): +# self._dispatch(t.body) +# self._write(" if ") +# self._dispatch(t.test) +# if t.orelse: +# self._write(" else ") +# self._dispatch(t.orelse) +# +# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} +# def _UnaryOp(self, t): +# self._write(self.unop[t.op.__class__.__name__]) +# self._write("(") +# self._dispatch(t.operand) +# self._write(")") +# +# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", +# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", +# "FloorDiv":"//", "Pow": "**"} +# def _BinOp(self, t): +# self._write("(") +# self._dispatch(t.left) +# self._write(")" + self.binop[t.op.__class__.__name__] + "(") +# self._dispatch(t.right) +# self._write(")") +# +# boolops = {_ast.And: 'and', _ast.Or: 'or'} +# def _BoolOp(self, t): +# self._write("(") +# self._dispatch(t.values[0]) +# for v in t.values[1:]: +# self._write(" %s " % self.boolops[t.op.__class__]) +# self._dispatch(v) +# self._write(")") +# +# def _Attribute(self,t): +# self._dispatch(t.value) +# self._write(".") +# self._write(t.attr) +# +## def _Call(self, t): +## self._dispatch(t.func) +## self._write("(") +## comma = False +## for e in t.args: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## for e in t.keywords: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## if t.starargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("*") +## self._dispatch(t.starargs) +## if t.kwargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("**") +## self._dispatch(t.kwargs) +## self._write(")") +# +# # slice +# def _Index(self, t): +# self._dispatch(t.value) +# +# def _ExtSlice(self, t): +# for i, d in enumerate(t.dims): +# if i != 0: +# self._write(': ') +# self._dispatch(d) +# +# # others +# def _arguments(self, t): +# first = True +# nonDef = len(t.args)-len(t.defaults) +# for a in t.args[0:nonDef]: +# if first:first = False +# else: self._write(", ") +# self._dispatch(a) +# for a,d in zip(t.args[nonDef:], t.defaults): +# if first:first = False +# else: self._write(", ") +# self._dispatch(a), +# self._write("=") +# self._dispatch(d) +# if t.vararg: +# if first:first = False +# else: self._write(", ") +# self._write("*"+t.vararg) +# if t.kwarg: +# if first:first = False +# else: self._write(", ") +# self._write("**"+t.kwarg) +# +## def _keyword(self, t): +## self._write(t.arg) +## self._write("=") +## self._dispatch(t.value) +# +# def _Lambda(self, t): +# self._write("lambda ") +# self._dispatch(t.args) +# self._write(": ") +# self._dispatch(t.body) + + + diff --git a/docs/sphinxext/docscrape.py b/docs/sphinxext/docscrape.py new file mode 100644 index 0000000000..615ea11f8d --- /dev/null +++ b/docs/sphinxext/docscrape.py @@ -0,0 +1,500 @@ +"""Extract reference documentation from the NumPy source tree. + +""" + +import inspect +import textwrap +import re +import pydoc +from StringIO import StringIO +from warnings import warn + +class Reader(object): + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\n'. + + """ + if isinstance(data,list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + def is_empty(line): + return not line.strip() + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self,n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class NumpyDocString(object): + def __init__(self, docstring, config={}): + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def __getitem__(self,key): + return self._parsed_data[key] + + def __setitem__(self,key,val): + if not self._parsed_data.has_key(key): + warn("Unknown section %s" % key) + else: + self._parsed_data[key] = val + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self,doc): + i = 0 + j = 0 + for i,line in enumerate(doc): + if line.strip(): break + + for j,line in enumerate(doc[::-1]): + if line.strip(): break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self,content): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + + params.append((arg_name,arg_type,desc)) + + return params + + + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" + r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'""" + m = self._name_rgx.match(text) + if m: + g = m.groups() + if g[1] is None: + return g[3], None + else: + return g[2], g[1] + raise ValueError("%s is not a item name" % text) + + def push_item(name, rest): + if not name: + return + name, role = parse_item_name(name) + items.append((name, list(rest), role)) + del rest[:] + + current_func = None + rest = [] + + for line in content: + if not line.strip(): continue + + m = self._name_rgx.match(line) + if m and line[m.end():].strip().startswith(':'): + push_item(current_func, rest) + current_func, line = line[:m.end()], line[m.end():] + rest = [line.split(':', 1)[1].strip()] + if not rest[0]: + rest = [] + elif not line.startswith(' '): + push_item(current_func, rest) + current_func = None + if ',' in line: + for func in line.split(','): + if func.strip(): + push_item(func, []) + elif line.strip(): + current_func = line + elif current_func is not None: + rest.append(line.strip()) + push_item(current_func, rest) + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + self['Summary'] = self._doc.read_to_next_empty_line() + else: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + for (section,content) in self._read_sections(): + if not section.startswith('..'): + section = ' '.join([s.capitalize() for s in section.split(' ')]) + if section in ('Parameters', 'Returns', 'Raises', 'Warns', + 'Other Parameters', 'Attributes', 'Methods'): + self[section] = self._parse_param_list(content) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*','\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param,param_type,desc in self[name]: + out += ['%s : %s' % (param, param_type)] + out += self._str_indent(desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: return [] + out = [] + out += self._str_header("See Also") + last_had_desc = True + for func, desc, role in self['See Also']: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + if desc or last_had_desc: + out += [''] + out += [link] + else: + out[-1] += ", %s" % link + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + out += [' :%s: %s' % (section, ', '.join(references))] + return out + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Other Parameters', + 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes','References','Examples'): + out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) + out += self._str_index() + return '\n'.join(out) + + +def indent(str,indent=4): + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func', doc=None, config={}): + self._f = func + self._role = role # e.g. "func" or "meth" + + if doc is None: + if func is None: + raise ValueError("No function or docstring given") + doc = inspect.getdoc(func) or '' + NumpyDocString.__init__(self, doc) + + if not self['Signature'] and func is not None: + func, func_name = self.get_func() + try: + # try to read signature + argspec = inspect.getargspec(func) + argspec = inspect.formatargspec(*argspec) + argspec = argspec.replace('*','\*') + signature = '%s%s' % (func_name, argspec) + except TypeError, e: + signature = '%s()' % func_name + self['Signature'] = signature + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + signature = self['Signature'].replace('*', '\*') + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if not roles.has_key(self._role): + print "Warning: invalid role %s" % self._role + out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), + func_name) + + out += super(FunctionDoc, self).__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, + config={}): + if not inspect.isclass(cls) and cls is not None: + raise ValueError("Expected a class or None, but got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + + if doc is None: + if cls is None: + raise ValueError("No class or documentation string given") + doc = pydoc.getdoc(cls) + + NumpyDocString.__init__(self, doc) + + if config.get('show_class_members', True): + if not self['Methods']: + self['Methods'] = [(name, '', '') + for name in sorted(self.methods)] + if not self['Attributes']: + self['Attributes'] = [(name, '', '') + for name in sorted(self.properties)] + + @property + def methods(self): + if self._cls is None: + return [] + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and callable(func)] + + @property + def properties(self): + if self._cls is None: + return [] + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and func is None] diff --git a/docs/sphinxext/docscrape_sphinx.py b/docs/sphinxext/docscrape_sphinx.py new file mode 100644 index 0000000000..e44e770ef8 --- /dev/null +++ b/docs/sphinxext/docscrape_sphinx.py @@ -0,0 +1,227 @@ +import re, inspect, textwrap, pydoc +import sphinx +from docscrape import NumpyDocString, FunctionDoc, ClassDoc + +class SphinxDocString(NumpyDocString): + def __init__(self, docstring, config={}): + self.use_plots = config.get('use_plots', False) + NumpyDocString.__init__(self, docstring, config=config) + + # string conversion routines + def _str_header(self, name, symbol='`'): + return ['.. rubric:: ' + name, ''] + + def _str_field_list(self, name): + return [':' + name + ':'] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + return [''] + if self['Signature']: + return ['``%s``' % self['Signature']] + [''] + else: + return [''] + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Extended Summary'] + [''] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_field_list(name) + out += [''] + for param,param_type,desc in self[name]: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + out += [''] + out += self._str_indent(desc,8) + out += [''] + return out + + @property + def _obj(self): + if hasattr(self, '_cls'): + return self._cls + elif hasattr(self, '_f'): + return self._f + return None + + def _str_member_list(self, name): + """ + Generate a member listing, autosummary:: table where possible, + and a table where not. + + """ + out = [] + if self[name]: + out += ['.. rubric:: %s' % name, ''] + prefix = getattr(self, '_name', '') + + if prefix: + prefix = '~%s.' % prefix + + autosum = [] + others = [] + for param, param_type, desc in self[name]: + param = param.strip() + if not self._obj or hasattr(self._obj, param): + autosum += [" %s%s" % (prefix, param)] + else: + others.append((param, param_type, desc)) + + if autosum: + out += ['.. autosummary::', ' :toctree:', ''] + out += autosum + + if others: + maxlen_0 = max([len(x[0]) for x in others]) + maxlen_1 = max([len(x[1]) for x in others]) + hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 + fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) + n_indent = maxlen_0 + maxlen_1 + 4 + out += [hdr] + for param, param_type, desc in others: + out += [fmt % (param.strip(), param_type)] + out += self._str_indent(desc, n_indent) + out += [hdr] + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += [''] + content = textwrap.dedent("\n".join(self[name])).split("\n") + out += content + out += [''] + return out + + def _str_see_also(self, func_role): + out = [] + if self['See Also']: + see_also = super(SphinxDocString, self)._str_see_also(func_role) + out = ['.. seealso::', ''] + out += self._str_indent(see_also[2:]) + return out + + def _str_warnings(self): + out = [] + if self['Warnings']: + out = ['.. warning::', ''] + out += self._str_indent(self['Warnings']) + return out + + def _str_index(self): + idx = self['index'] + out = [] + if len(idx) == 0: + return out + + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + elif section == 'refguide': + out += [' single: %s' % (', '.join(references))] + else: + out += [' %s: %s' % (section, ','.join(references))] + return out + + def _str_references(self): + out = [] + if self['References']: + out += self._str_header('References') + if isinstance(self['References'], str): + self['References'] = [self['References']] + out.extend(self['References']) + out += [''] + # Latex collects all references to a separate bibliography, + # so we need to insert links to it + if sphinx.__version__ >= "0.6": + out += ['.. only:: latex',''] + else: + out += ['.. latexonly::',''] + items = [] + for line in self['References']: + m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) + if m: + items.append(m.group(1)) + out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] + return out + + def _str_examples(self): + examples_str = "\n".join(self['Examples']) + + if (self.use_plots and 'import matplotlib' in examples_str + and 'plot::' not in examples_str): + out = [] + out += self._str_header('Examples') + out += ['.. plot::', ''] + out += self._str_indent(self['Examples']) + out += [''] + return out + else: + return self._str_section('Examples') + + def __str__(self, indent=0, func_role="obj"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Other Parameters', + 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_warnings() + out += self._str_see_also(func_role) + out += self._str_section('Notes') + out += self._str_references() + out += self._str_examples() + for param_list in ('Attributes', 'Methods'): + out += self._str_member_list(param_list) + out = self._str_indent(out,indent) + return '\n'.join(out) + +class SphinxFunctionDoc(SphinxDocString, FunctionDoc): + def __init__(self, obj, doc=None, config={}): + self.use_plots = config.get('use_plots', False) + FunctionDoc.__init__(self, obj, doc=doc, config=config) + +class SphinxClassDoc(SphinxDocString, ClassDoc): + def __init__(self, obj, doc=None, func_doc=None, config={}): + self.use_plots = config.get('use_plots', False) + ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) + +class SphinxObjDoc(SphinxDocString): + def __init__(self, obj, doc=None, config={}): + self._f = obj + SphinxDocString.__init__(self, doc, config=config) + +def get_doc_object(obj, what=None, doc=None, config={}): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif callable(obj): + what = 'function' + else: + what = 'object' + if what == 'class': + return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, + config=config) + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, doc=doc, config=config) + else: + if doc is None: + doc = pydoc.getdoc(obj) + return SphinxObjDoc(obj, doc, config=config) diff --git a/docs/sphinxext/inheritance_diagram.py b/docs/sphinxext/inheritance_diagram.py new file mode 100644 index 0000000000..407fc13ffb --- /dev/null +++ b/docs/sphinxext/inheritance_diagram.py @@ -0,0 +1,407 @@ +""" +Defines a docutils directive for inserting inheritance diagrams. + +Provide the directive with one or more classes or modules (separated +by whitespace). For modules, all of the classes in that module will +be used. + +Example:: + + Given the following classes: + + class A: pass + class B(A): pass + class C(A): pass + class D(B, C): pass + class E(B): pass + + .. inheritance-diagram: D E + + Produces a graph like the following: + + A + / \ + B C + / \ / + E D + +The graph is inserted as a PNG+image map into HTML and a PDF in +LaTeX. +""" + +import inspect +import os +import re +import subprocess +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +from docutils.nodes import Body, Element +from docutils.parsers.rst import directives +from sphinx.roles import xfileref_role + +def my_import(name): + """Module importer - taken from the python documentation. + + This function allows importing names with dots in them.""" + + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +class DotException(Exception): + pass + +class InheritanceGraph(object): + """ + Given a list of classes, determines the set of classes that + they inherit from all the way to the root "object", and then + is able to generate a graphviz dot graph from them. + """ + def __init__(self, class_names, show_builtins=False): + """ + *class_names* is a list of child classes to show bases from. + + If *show_builtins* is True, then Python builtins will be shown + in the graph. + """ + self.class_names = class_names + self.classes = self._import_classes(class_names) + self.all_classes = self._all_classes(self.classes) + if len(self.all_classes) == 0: + raise ValueError("No classes found for inheritance diagram") + self.show_builtins = show_builtins + + py_sig_re = re.compile(r'''^([\w.]*\.)? # class names + (\w+) \s* $ # optionally arguments + ''', re.VERBOSE) + + def _import_class_or_module(self, name): + """ + Import a class using its fully-qualified *name*. + """ + try: + path, base = self.py_sig_re.match(name).groups() + except: + raise ValueError( + "Invalid class or module '%s' specified for inheritance diagram" % name) + fullname = (path or '') + base + path = (path and path.rstrip('.')) + if not path: + path = base + try: + module = __import__(path, None, None, []) + # We must do an import of the fully qualified name. Otherwise if a + # subpackage 'a.b' is requested where 'import a' does NOT provide + # 'a.b' automatically, then 'a.b' will not be found below. This + # second call will force the equivalent of 'import a.b' to happen + # after the top-level import above. + my_import(fullname) + + except ImportError: + raise ValueError( + "Could not import class or module '%s' specified for inheritance diagram" % name) + + try: + todoc = module + for comp in fullname.split('.')[1:]: + todoc = getattr(todoc, comp) + except AttributeError: + raise ValueError( + "Could not find class or module '%s' specified for inheritance diagram" % name) + + # If a class, just return it + if inspect.isclass(todoc): + return [todoc] + elif inspect.ismodule(todoc): + classes = [] + for cls in todoc.__dict__.values(): + if inspect.isclass(cls) and cls.__module__ == todoc.__name__: + classes.append(cls) + return classes + raise ValueError( + "'%s' does not resolve to a class or module" % name) + + def _import_classes(self, class_names): + """ + Import a list of classes. + """ + classes = [] + for name in class_names: + classes.extend(self._import_class_or_module(name)) + return classes + + def _all_classes(self, classes): + """ + Return a list of all classes that are ancestors of *classes*. + """ + all_classes = {} + + def recurse(cls): + all_classes[cls] = None + for c in cls.__bases__: + if c not in all_classes: + recurse(c) + + for cls in classes: + recurse(cls) + + return all_classes.keys() + + def class_name(self, cls, parts=0): + """ + Given a class object, return a fully-qualified name. This + works for things I've tested in matplotlib so far, but may not + be completely general. + """ + module = cls.__module__ + if module == '__builtin__': + fullname = cls.__name__ + else: + fullname = "%s.%s" % (module, cls.__name__) + if parts == 0: + return fullname + name_parts = fullname.split('.') + return '.'.join(name_parts[-parts:]) + + def get_all_class_names(self): + """ + Get all of the class names involved in the graph. + """ + return [self.class_name(x) for x in self.all_classes] + + # These are the default options for graphviz + default_graph_options = { + "rankdir": "LR", + "size": '"8.0, 12.0"' + } + default_node_options = { + "shape": "box", + "fontsize": 10, + "height": 0.25, + "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans", + "style": '"setlinewidth(0.5)"' + } + default_edge_options = { + "arrowsize": 0.5, + "style": '"setlinewidth(0.5)"' + } + + def _format_node_options(self, options): + return ','.join(["%s=%s" % x for x in options.items()]) + def _format_graph_options(self, options): + return ''.join(["%s=%s;\n" % x for x in options.items()]) + + def generate_dot(self, fd, name, parts=0, urls={}, + graph_options={}, node_options={}, + edge_options={}): + """ + Generate a graphviz dot graph from the classes that + were passed in to __init__. + + *fd* is a Python file-like object to write to. + + *name* is the name of the graph + + *urls* is a dictionary mapping class names to http urls + + *graph_options*, *node_options*, *edge_options* are + dictionaries containing key/value pairs to pass on as graphviz + properties. + """ + g_options = self.default_graph_options.copy() + g_options.update(graph_options) + n_options = self.default_node_options.copy() + n_options.update(node_options) + e_options = self.default_edge_options.copy() + e_options.update(edge_options) + + fd.write('digraph %s {\n' % name) + fd.write(self._format_graph_options(g_options)) + + for cls in self.all_classes: + if not self.show_builtins and cls in __builtins__.values(): + continue + + name = self.class_name(cls, parts) + + # Write the node + this_node_options = n_options.copy() + url = urls.get(self.class_name(cls)) + if url is not None: + this_node_options['URL'] = '"%s"' % url + fd.write(' "%s" [%s];\n' % + (name, self._format_node_options(this_node_options))) + + # Write the edges + for base in cls.__bases__: + if not self.show_builtins and base in __builtins__.values(): + continue + + base_name = self.class_name(base, parts) + fd.write(' "%s" -> "%s" [%s];\n' % + (base_name, name, + self._format_node_options(e_options))) + fd.write('}\n') + + def run_dot(self, args, name, parts=0, urls={}, + graph_options={}, node_options={}, edge_options={}): + """ + Run graphviz 'dot' over this graph, returning whatever 'dot' + writes to stdout. + + *args* will be passed along as commandline arguments. + + *name* is the name of the graph + + *urls* is a dictionary mapping class names to http urls + + Raises DotException for any of the many os and + installation-related errors that may occur. + """ + try: + dot = subprocess.Popen(['dot'] + list(args), + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + close_fds=True) + except OSError: + raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") + except ValueError: + raise DotException("'dot' called with invalid arguments") + except: + raise DotException("Unexpected error calling 'dot'") + + self.generate_dot(dot.stdin, name, parts, urls, graph_options, + node_options, edge_options) + dot.stdin.close() + result = dot.stdout.read() + returncode = dot.wait() + if returncode != 0: + raise DotException("'dot' returned the errorcode %d" % returncode) + return result + +class inheritance_diagram(Body, Element): + """ + A docutils node to use as a placeholder for the inheritance + diagram. + """ + pass + +def inheritance_diagram_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, + state_machine): + """ + Run when the inheritance_diagram directive is first encountered. + """ + node = inheritance_diagram() + + class_names = arguments + + # Create a graph starting with the list of classes + graph = InheritanceGraph(class_names) + + # Create xref nodes for each target of the graph's image map and + # add them to the doc tree so that Sphinx can resolve the + # references to real URLs later. These nodes will eventually be + # removed from the doctree after we're done with them. + for name in graph.get_all_class_names(): + refnodes, x = xfileref_role( + 'class', ':class:`%s`' % name, name, 0, state) + node.extend(refnodes) + # Store the graph object so we can use it to generate the + # dot file later + node['graph'] = graph + # Store the original content for use as a hash + node['parts'] = options.get('parts', 0) + node['content'] = " ".join(class_names) + return [node] + +def get_graph_hash(node): + return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] + +def html_output_graph(self, node): + """ + Output the graph for HTML. This will insert a PNG with clickable + image map. + """ + graph = node['graph'] + parts = node['parts'] + + graph_hash = get_graph_hash(node) + name = "inheritance%s" % graph_hash + path = '_images' + dest_path = os.path.join(setup.app.builder.outdir, path) + if not os.path.exists(dest_path): + os.makedirs(dest_path) + png_path = os.path.join(dest_path, name + ".png") + path = setup.app.builder.imgpath + + # Create a mapping from fully-qualified class names to URLs. + urls = {} + for child in node: + if child.get('refuri') is not None: + urls[child['reftitle']] = child.get('refuri') + elif child.get('refid') is not None: + urls[child['reftitle']] = '#' + child.get('refid') + + # These arguments to dot will save a PNG file to disk and write + # an HTML image map to stdout. + image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'], + name, parts, urls) + return ('%s' % + (path, name, name, image_map)) + +def latex_output_graph(self, node): + """ + Output the graph for LaTeX. This will insert a PDF. + """ + graph = node['graph'] + parts = node['parts'] + + graph_hash = get_graph_hash(node) + name = "inheritance%s" % graph_hash + dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images')) + if not os.path.exists(dest_path): + os.makedirs(dest_path) + pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf")) + + graph.run_dot(['-Tpdf', '-o%s' % pdf_path], + name, parts, graph_options={'size': '"6.0,6.0"'}) + return '\n\\includegraphics{%s}\n\n' % pdf_path + +def visit_inheritance_diagram(inner_func): + """ + This is just a wrapper around html/latex_output_graph to make it + easier to handle errors and insert warnings. + """ + def visitor(self, node): + try: + content = inner_func(self, node) + except DotException, e: + # Insert the exception as a warning in the document + warning = self.document.reporter.warning(str(e), line=node.line) + warning.parent = node + node.children = [warning] + else: + source = self.document.attributes['source'] + self.body.append(content) + node.children = [] + return visitor + +def do_nothing(self, node): + pass + +def setup(app): + setup.app = app + setup.confdir = app.confdir + + app.add_node( + inheritance_diagram, + latex=(visit_inheritance_diagram(latex_output_graph), do_nothing), + html=(visit_inheritance_diagram(html_output_graph), do_nothing)) + app.add_directive( + 'inheritance-diagram', inheritance_diagram_directive, + False, (1, 100, 0), parts = directives.nonnegative_int) diff --git a/docs/sphinxext/ipython_console_highlighting.py b/docs/sphinxext/ipython_console_highlighting.py new file mode 100644 index 0000000000..217b779dda --- /dev/null +++ b/docs/sphinxext/ipython_console_highlighting.py @@ -0,0 +1,114 @@ +"""reST directive for syntax-highlighting ipython interactive sessions. + +XXX - See what improvements can be made based on the new (as of Sept 2009) +'pycon' lexer for the python console. At the very least it will give better +highlighted tracebacks. +""" + +#----------------------------------------------------------------------------- +# Needed modules + +# Standard library +import re + +# Third party +from pygments.lexer import Lexer, do_insertions +from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, + PythonTracebackLexer) +from pygments.token import Comment, Generic + +from sphinx import highlighting + +#----------------------------------------------------------------------------- +# Global constants +line_re = re.compile('.*?\n') + +#----------------------------------------------------------------------------- +# Code begins - classes and functions + +class IPythonConsoleLexer(Lexer): + """ + For IPython console output or doctests, such as: + + .. sourcecode:: ipython + + In [1]: a = 'foo' + + In [2]: a + Out[2]: 'foo' + + In [3]: print a + foo + + In [4]: 1 / 0 + + Notes: + + - Tracebacks are not currently supported. + + - It assumes the default IPython prompts, not customized ones. + """ + + name = 'IPython console session' + aliases = ['ipython'] + mimetypes = ['text/x-ipython-console'] + input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") + output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") + continue_prompt = re.compile(" \.\.\.+:") + tb_start = re.compile("\-+") + + def get_tokens_unprocessed(self, text): + pylexer = PythonLexer(**self.options) + tblexer = PythonTracebackLexer(**self.options) + + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + input_prompt = self.input_prompt.match(line) + continue_prompt = self.continue_prompt.match(line.rstrip()) + output_prompt = self.output_prompt.match(line) + if line.startswith("#"): + insertions.append((len(curcode), + [(0, Comment, line)])) + elif input_prompt is not None: + insertions.append((len(curcode), + [(0, Generic.Prompt, input_prompt.group())])) + curcode += line[input_prompt.end():] + elif continue_prompt is not None: + insertions.append((len(curcode), + [(0, Generic.Prompt, continue_prompt.group())])) + curcode += line[continue_prompt.end():] + elif output_prompt is not None: + # Use the 'error' token for output. We should probably make + # our own token, but error is typicaly in a bright color like + # red, so it works fine for our output prompts. + insertions.append((len(curcode), + [(0, Generic.Error, output_prompt.group())])) + curcode += line[output_prompt.end():] + else: + if curcode: + for item in do_insertions(insertions, + pylexer.get_tokens_unprocessed(curcode)): + yield item + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + for item in do_insertions(insertions, + pylexer.get_tokens_unprocessed(curcode)): + yield item + + +def setup(app): + """Setup as a sphinx extension.""" + + # This is only a lexer, so adding it below to pygments appears sufficient. + # But if somebody knows that the right API usage should be to do that via + # sphinx, by all means fix it here. At least having this setup.py + # suppresses the sphinx warning we'd get without it. + pass + +#----------------------------------------------------------------------------- +# Register the extension as a valid pygments lexer +highlighting.lexers['ipython'] = IPythonConsoleLexer() diff --git a/docs/sphinxext/ipython_directive.py b/docs/sphinxext/ipython_directive.py new file mode 100644 index 0000000000..cc914ee18d --- /dev/null +++ b/docs/sphinxext/ipython_directive.py @@ -0,0 +1,635 @@ +# -*- coding: utf-8 -*- +"""Sphinx directive to support embedded IPython code. + +This directive allows pasting of entire interactive IPython sessions, prompts +and all, and their code will actually get re-executed at doc build time, with +all prompts renumbered sequentially. + +To enable this directive, simply list it in your Sphinx ``conf.py`` file +(making sure the directory where you placed it is visible to sphinx, as is +needed for all Sphinx directives). + +By default this directive assumes that your prompts are unchanged IPython ones, +but this can be customized. For example, the following code in your Sphinx +config file will configure this directive for the following input/output +prompts ``Yade [1]:`` and ``-> [1]:``:: + + import ipython_directive as id + id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*') + id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*') + id.fmtin ='Yade [%d]:' + id.fmtout=' -> [%d]:' + + from IPython import Config + id.CONFIG = Config( + prompt_in1="Yade [\#]:", + prompt_in2=" .\D..", + prompt_out=" -> [\#]:" + ) + id.reconfig_shell() + + import ipython_console_highlighting as ich + ich.IPythonConsoleLexer.input_prompt= + re.compile("(Yade \[[0-9]+\]: )|( \.\.\.+:)") + ich.IPythonConsoleLexer.output_prompt= + re.compile("(( -> )|(Out)\[[0-9]+\]: )|( \.\.\.+:)") + ich.IPythonConsoleLexer.continue_prompt=re.compile(" \.\.\.+:") + + +ToDo +---- + +- Turn the ad-hoc test() function into a real test suite. +- Break up ipython-specific functionality from matplotlib stuff into better + separated code. +- Make sure %bookmarks used internally are removed on exit. + + +Authors +------- + +- John D Hunter: orignal author. +- Fernando Perez: refactoring, documentation, cleanups, port to 0.11. +- VáclavŠmilauer : Prompt generalizations. +""" + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +# Stdlib +import cStringIO +import os +import re +import sys + +# To keep compatibility with various python versions +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +# Third-party +import matplotlib +import sphinx +from docutils.parsers.rst import directives + +matplotlib.use('Agg') + +# Our own +from IPython import Config, InteractiveShell +from IPython.utils.io import Term + +#----------------------------------------------------------------------------- +# Globals +#----------------------------------------------------------------------------- + +sphinx_version = sphinx.__version__.split(".") +# The split is necessary for sphinx beta versions where the string is +# '6b1' +sphinx_version = tuple([int(re.split('[a-z]', x)[0]) + for x in sphinx_version[:2]]) + +COMMENT, INPUT, OUTPUT = range(3) +CONFIG = Config() +rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*') +rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*') +fmtin = 'In [%d]:' +fmtout = 'Out[%d]:' + +#----------------------------------------------------------------------------- +# Functions and class declarations +#----------------------------------------------------------------------------- +def block_parser(part): + """ + part is a string of ipython text, comprised of at most one + input, one ouput, comments, and blank lines. The block parser + parses the text into a list of:: + + blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] + + where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and + data is, depending on the type of token:: + + COMMENT : the comment string + + INPUT: the (DECORATOR, INPUT_LINE, REST) where + DECORATOR: the input decorator (or None) + INPUT_LINE: the input as string (possibly multi-line) + REST : any stdout generated by the input line (not OUTPUT) + + + OUTPUT: the output string, possibly multi-line + """ + + block = [] + lines = part.split('\n') + N = len(lines) + i = 0 + decorator = None + while 1: + + if i==N: + # nothing left to parse -- the last line + break + + line = lines[i] + i += 1 + line_stripped = line.strip() + if line_stripped.startswith('#'): + block.append((COMMENT, line)) + continue + + if line_stripped.startswith('@'): + # we're assuming at most one decorator -- may need to + # rethink + decorator = line_stripped + continue + + # does this look like an input line? + matchin = rgxin.match(line) + if matchin: + lineno, inputline = int(matchin.group(1)), matchin.group(2) + + # the ....: continuation string + continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) + Nc = len(continuation) + # input lines can continue on for more than one line, if + # we have a '\' line continuation char or a function call + # echo line 'print'. The input line can only be + # terminated by the end of the block or an output line, so + # we parse out the rest of the input line if it is + # multiline as well as any echo text + + rest = [] + while i2: + if debug: + print '\n'.join(lines) + else: + #print 'INSERTING %d lines'%len(lines) + state_machine.insert_input( + lines, state_machine.input_lines.source(0)) + + return [] + +ipython_directive.DEBUG = False +ipython_directive.DEBUG = True # dbg + +# Enable as a proper Sphinx directive +def setup(app): + setup.app = app + options = {'suppress': directives.flag, + 'doctest': directives.flag, + 'verbatim': directives.flag, + } + + app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options) + + +# Simple smoke test, needs to be converted to a proper automatic test. +def test(): + + examples = [ + r""" +In [9]: pwd +Out[9]: '/home/jdhunter/py4science/book' + +In [10]: cd bookdata/ +/home/jdhunter/py4science/book/bookdata + +In [2]: from pylab import * + +In [2]: ion() + +In [3]: im = imread('stinkbug.png') + +@savefig mystinkbug.png width=4in +In [4]: imshow(im) +Out[4]: + +""", + r""" + +In [1]: x = 'hello world' + +# string methods can be +# used to alter the string +@doctest +In [2]: x.upper() +Out[2]: 'HELLO WORLD' + +@verbatim +In [3]: x.st +x.startswith x.strip +""", + r""" + +In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ + .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' + +In [131]: print url.split('&') +['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] + +In [60]: import urllib + +""", + r"""\ + +In [133]: import numpy.random + +@suppress +In [134]: numpy.random.seed(2358) + +@doctest +In [135]: np.random.rand(10,2) +Out[135]: +array([[ 0.64524308, 0.59943846], + [ 0.47102322, 0.8715456 ], + [ 0.29370834, 0.74776844], + [ 0.99539577, 0.1313423 ], + [ 0.16250302, 0.21103583], + [ 0.81626524, 0.1312433 ], + [ 0.67338089, 0.72302393], + [ 0.7566368 , 0.07033696], + [ 0.22591016, 0.77731835], + [ 0.0072729 , 0.34273127]]) + +""", + + r""" +In [106]: print x +jdh + +In [109]: for i in range(10): + .....: print i + .....: + .....: +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +""", + + r""" + +In [144]: from pylab import * + +In [145]: ion() + +# use a semicolon to suppress the output +@savefig test_hist.png width=4in +In [151]: hist(np.random.randn(10000), 100); + + +@savefig test_plot.png width=4in +In [151]: plot(np.random.randn(10000), 'o'); + """, + + r""" +# use a semicolon to suppress the output +In [151]: plt.clf() + +@savefig plot_simple.png width=4in +In [151]: plot([1,2,3]) + +@savefig hist_simple.png width=4in +In [151]: hist(np.random.randn(10000), 100); + +""", + r""" +# update the current fig +In [151]: ylabel('number') + +In [152]: title('normal distribution') + + +@savefig hist_with_text.png +In [153]: grid(True) + + """, + ] + + #ipython_directive.DEBUG = True # dbg + #options = dict(suppress=True) # dbg + options = dict() + for example in examples: + content = example.split('\n') + ipython_directive('debug', arguments=None, options=options, + content=content, lineno=0, + content_offset=None, block_text=None, + state=None, state_machine=None, + ) + +# Run test suite as a script +if __name__=='__main__': + if not os.path.isdir('_static'): + os.mkdir('_static') + test() + print 'All OK? Check figures in _static/' diff --git a/docs/sphinxext/numpydoc.py b/docs/sphinxext/numpydoc.py new file mode 100644 index 0000000000..43c67336b5 --- /dev/null +++ b/docs/sphinxext/numpydoc.py @@ -0,0 +1,169 @@ +""" +======== +numpydoc +======== + +Sphinx extension that handles docstrings in the Numpy standard format. [1] + +It will: + +- Convert Parameters etc. sections to field lists. +- Convert See Also section to a See also entry. +- Renumber references. +- Extract the signature from the docstring, if it can't be determined otherwise. + +.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard + +""" + +import sphinx + +if sphinx.__version__ < '1.0.1': + raise RuntimeError("Sphinx 1.0.1 or newer is required") + +import os, re, pydoc +from docscrape_sphinx import get_doc_object, SphinxDocString +from sphinx.util.compat import Directive +import inspect + +def mangle_docstrings(app, what, name, obj, options, lines, + reference_offset=[0]): + + cfg = dict(use_plots=app.config.numpydoc_use_plots, + show_class_members=app.config.numpydoc_show_class_members) + + if what == 'module': + # Strip top title + title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', + re.I|re.S) + lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") + else: + doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) + lines[:] = unicode(doc).split(u"\n") + + if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ + obj.__name__: + if hasattr(obj, '__module__'): + v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) + else: + v = dict(full_name=obj.__name__) + lines += [u'', u'.. htmlonly::', ''] + lines += [u' %s' % x for x in + (app.config.numpydoc_edit_link % v).split("\n")] + + # replace reference numbers so that there are no duplicates + references = [] + for line in lines: + line = line.strip() + m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) + if m: + references.append(m.group(1)) + + # start renaming from the longest string, to avoid overwriting parts + references.sort(key=lambda x: -len(x)) + if references: + for i, line in enumerate(lines): + for r in references: + if re.match(ur'^\d+$', r): + new_r = u"R%d" % (reference_offset[0] + int(r)) + else: + new_r = u"%s%d" % (r, reference_offset[0]) + lines[i] = lines[i].replace(u'[%s]_' % r, + u'[%s]_' % new_r) + lines[i] = lines[i].replace(u'.. [%s]' % r, + u'.. [%s]' % new_r) + + reference_offset[0] += len(references) + +def mangle_signature(app, what, name, obj, options, sig, retann): + # Do not try to inspect classes that don't define `__init__` + if (inspect.isclass(obj) and + (not hasattr(obj, '__init__') or + 'initializes x; see ' in pydoc.getdoc(obj.__init__))): + return '', '' + + if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return + if not hasattr(obj, '__doc__'): return + + doc = SphinxDocString(pydoc.getdoc(obj)) + if doc['Signature']: + sig = re.sub(u"^[^(]*", u"", doc['Signature']) + return sig, u'' + +def setup(app, get_doc_object_=get_doc_object): + global get_doc_object + get_doc_object = get_doc_object_ + + app.connect('autodoc-process-docstring', mangle_docstrings) + app.connect('autodoc-process-signature', mangle_signature) + app.add_config_value('numpydoc_edit_link', None, False) + app.add_config_value('numpydoc_use_plots', None, False) + app.add_config_value('numpydoc_show_class_members', True, True) + + # Extra mangling domains + app.add_domain(NumpyPythonDomain) + app.add_domain(NumpyCDomain) + +#------------------------------------------------------------------------------ +# Docstring-mangling domains +#------------------------------------------------------------------------------ + +from docutils.statemachine import ViewList +from sphinx.domains.c import CDomain +from sphinx.domains.python import PythonDomain + +class ManglingDomainBase(object): + directive_mangling_map = {} + + def __init__(self, *a, **kw): + super(ManglingDomainBase, self).__init__(*a, **kw) + self.wrap_mangling_directives() + + def wrap_mangling_directives(self): + for name, objtype in self.directive_mangling_map.items(): + self.directives[name] = wrap_mangling_directive( + self.directives[name], objtype) + +class NumpyPythonDomain(ManglingDomainBase, PythonDomain): + name = 'np' + directive_mangling_map = { + 'function': 'function', + 'class': 'class', + 'exception': 'class', + 'method': 'function', + 'classmethod': 'function', + 'staticmethod': 'function', + 'attribute': 'attribute', + } + +class NumpyCDomain(ManglingDomainBase, CDomain): + name = 'np-c' + directive_mangling_map = { + 'function': 'function', + 'member': 'attribute', + 'macro': 'function', + 'type': 'class', + 'var': 'object', + } + +def wrap_mangling_directive(base_directive, objtype): + class directive(base_directive): + def run(self): + env = self.state.document.settings.env + + name = None + if self.arguments: + m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) + name = m.group(2).strip() + + if not name: + name = self.arguments[0] + + lines = list(self.content) + mangle_docstrings(env.app, objtype, name, None, None, lines) + self.content = ViewList(lines, self.content.parent) + + return base_directive.run(self) + + return directive + diff --git a/docs/sphinxext/phantom_import.py b/docs/sphinxext/phantom_import.py new file mode 100644 index 0000000000..c77eeb544e --- /dev/null +++ b/docs/sphinxext/phantom_import.py @@ -0,0 +1,162 @@ +""" +============== +phantom_import +============== + +Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar +extensions to use docstrings loaded from an XML file. + +This extension loads an XML file in the Pydocweb format [1] and +creates a dummy module that contains the specified docstrings. This +can be used to get the current docstrings from a Pydocweb instance +without needing to rebuild the documented module. + +.. [1] http://code.google.com/p/pydocweb + +""" +import imp, sys, compiler, types, os, inspect, re + +def setup(app): + app.connect('builder-inited', initialize) + app.add_config_value('phantom_import_file', None, True) + +def initialize(app): + fn = app.config.phantom_import_file + if (fn and os.path.isfile(fn)): + print "[numpydoc] Phantom importing modules from", fn, "..." + import_phantom_module(fn) + +#------------------------------------------------------------------------------ +# Creating 'phantom' modules from an XML description +#------------------------------------------------------------------------------ +def import_phantom_module(xml_file): + """ + Insert a fake Python module to sys.modules, based on a XML file. + + The XML file is expected to conform to Pydocweb DTD. The fake + module will contain dummy objects, which guarantee the following: + + - Docstrings are correct. + - Class inheritance relationships are correct (if present in XML). + - Function argspec is *NOT* correct (even if present in XML). + Instead, the function signature is prepended to the function docstring. + - Class attributes are *NOT* correct; instead, they are dummy objects. + + Parameters + ---------- + xml_file : str + Name of an XML file to read + + """ + import lxml.etree as etree + + object_cache = {} + + tree = etree.parse(xml_file) + root = tree.getroot() + + # Sort items so that + # - Base classes come before classes inherited from them + # - Modules come before their contents + all_nodes = dict([(n.attrib['id'], n) for n in root]) + + def _get_bases(node, recurse=False): + bases = [x.attrib['ref'] for x in node.findall('base')] + if recurse: + j = 0 + while True: + try: + b = bases[j] + except IndexError: break + if b in all_nodes: + bases.extend(_get_bases(all_nodes[b])) + j += 1 + return bases + + type_index = ['module', 'class', 'callable', 'object'] + + def base_cmp(a, b): + x = cmp(type_index.index(a.tag), type_index.index(b.tag)) + if x != 0: return x + + if a.tag == 'class' and b.tag == 'class': + a_bases = _get_bases(a, recurse=True) + b_bases = _get_bases(b, recurse=True) + x = cmp(len(a_bases), len(b_bases)) + if x != 0: return x + if a.attrib['id'] in b_bases: return -1 + if b.attrib['id'] in a_bases: return 1 + + return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + + nodes = root.getchildren() + nodes.sort(base_cmp) + + # Create phantom items + for node in nodes: + name = node.attrib['id'] + doc = (node.text or '').decode('string-escape') + "\n" + if doc == "\n": doc = "" + + # create parent, if missing + parent = name + while True: + parent = '.'.join(parent.split('.')[:-1]) + if not parent: break + if parent in object_cache: break + obj = imp.new_module(parent) + object_cache[parent] = obj + sys.modules[parent] = obj + + # create object + if node.tag == 'module': + obj = imp.new_module(name) + obj.__doc__ = doc + sys.modules[name] = obj + elif node.tag == 'class': + bases = [object_cache[b] for b in _get_bases(node) + if b in object_cache] + bases.append(object) + init = lambda self: None + init.__doc__ = doc + obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) + obj.__name__ = name.split('.')[-1] + elif node.tag == 'callable': + funcname = node.attrib['id'].split('.')[-1] + argspec = node.attrib.get('argspec') + if argspec: + argspec = re.sub('^[^(]*', '', argspec) + doc = "%s%s\n\n%s" % (funcname, argspec, doc) + obj = lambda: 0 + obj.__argspec_is_invalid_ = True + obj.func_name = funcname + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__objclass__ = object_cache[parent] + else: + class Dummy(object): pass + obj = Dummy() + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__get__ = lambda: None + object_cache[name] = obj + + if parent: + if inspect.ismodule(object_cache[parent]): + obj.__module__ = parent + setattr(object_cache[parent], name.split('.')[-1], obj) + + # Populate items + for node in root: + obj = object_cache.get(node.attrib['id']) + if obj is None: continue + for ref in node.findall('ref'): + if node.tag == 'class': + if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) + else: + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) diff --git a/docs/sphinxext/plot_directive.py b/docs/sphinxext/plot_directive.py new file mode 100644 index 0000000000..80801e7986 --- /dev/null +++ b/docs/sphinxext/plot_directive.py @@ -0,0 +1,636 @@ +""" +A special directive for generating a matplotlib plot. + +.. warning:: + + This is a hacked version of plot_directive.py from Matplotlib. + It's very much subject to change! + + +Usage +----- + +Can be used like this:: + + .. plot:: examples/example.py + + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3], [4,5,6]) + + .. plot:: + + A plotting example: + + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3], [4,5,6]) + +The content is interpreted as doctest formatted if it has a line starting +with ``>>>``. + +The ``plot`` directive supports the options + + format : {'python', 'doctest'} + Specify the format of the input + + include-source : bool + Whether to display the source code. Default can be changed in conf.py + +and the ``image`` directive options ``alt``, ``height``, ``width``, +``scale``, ``align``, ``class``. + +Configuration options +--------------------- + +The plot directive has the following configuration options: + + plot_include_source + Default value for the include-source option + + plot_pre_code + Code that should be executed before each plot. + + plot_basedir + Base directory, to which plot:: file names are relative to. + (If None or empty, file names are relative to the directoly where + the file containing the directive is.) + + plot_formats + File formats to generate. List of tuples or strings:: + + [(suffix, dpi), suffix, ...] + + that determine the file format and the DPI. For entries whose + DPI was omitted, sensible defaults are chosen. + + plot_html_show_formats + Whether to show links to the files in HTML. + +TODO +---- + +* Refactor Latex output; now it's plain images, but it would be nice + to make them appear side-by-side, or in floats. + +""" + +import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback +import sphinx + +import warnings +warnings.warn("A plot_directive module is also available under " + "matplotlib.sphinxext; expect this numpydoc.plot_directive " + "module to be deprecated after relevant features have been " + "integrated there.", + FutureWarning, stacklevel=2) + + +#------------------------------------------------------------------------------ +# Registration hook +#------------------------------------------------------------------------------ + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_config_value('plot_pre_code', '', True) + app.add_config_value('plot_include_source', False, True) + app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) + app.add_config_value('plot_basedir', None, True) + app.add_config_value('plot_html_show_formats', True, True) + + app.add_directive('plot', plot_directive, True, (0, 1, False), + **plot_directive_options) + +#------------------------------------------------------------------------------ +# plot:: directive +#------------------------------------------------------------------------------ +from docutils.parsers.rst import directives +from docutils import nodes + +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) +plot_directive.__doc__ = __doc__ + +def _option_boolean(arg): + if not arg or not arg.strip(): + # no argument given, assume used as a flag + return True + elif arg.strip().lower() in ('no', '0', 'false'): + return False + elif arg.strip().lower() in ('yes', '1', 'true'): + return True + else: + raise ValueError('"%s" unknown boolean' % arg) + +def _option_format(arg): + return directives.choice(arg, ('python', 'lisp')) + +def _option_align(arg): + return directives.choice(arg, ("top", "middle", "bottom", "left", "center", + "right")) + +plot_directive_options = {'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': _option_align, + 'class': directives.class_option, + 'include-source': _option_boolean, + 'format': _option_format, + } + +#------------------------------------------------------------------------------ +# Generating output +#------------------------------------------------------------------------------ + +from docutils import nodes, utils + +try: + # Sphinx depends on either Jinja or Jinja2 + import jinja2 + def format_template(template, **kw): + return jinja2.Template(template).render(**kw) +except ImportError: + import jinja + def format_template(template, **kw): + return jinja.from_string(template, **kw) + +TEMPLATE = """ +{{ source_code }} + +{{ only_html }} + + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} + + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.png + {%- for option in options %} + {{ option }} + {% endfor %} + + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {% endfor %} + +{{ only_latex }} + + {% for img in images %} + .. image:: {{ build_dir }}/{{ img.basename }}.pdf + {% endfor %} + +""" + +class ImageFile(object): + def __init__(self, basename, dirname): + self.basename = basename + self.dirname = dirname + self.formats = [] + + def filename(self, format): + return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) + + def filenames(self): + return [self.filename(fmt) for fmt in self.formats] + +def run(arguments, content, options, state_machine, state, lineno): + if arguments and content: + raise RuntimeError("plot:: directive can't have both args and content") + + document = state_machine.document + config = document.settings.env.config + + options.setdefault('include-source', config.plot_include_source) + + # determine input + rst_file = document.attributes['source'] + rst_dir = os.path.dirname(rst_file) + + if arguments: + if not config.plot_basedir: + source_file_name = os.path.join(rst_dir, + directives.uri(arguments[0])) + else: + source_file_name = os.path.join(setup.confdir, config.plot_basedir, + directives.uri(arguments[0])) + code = open(source_file_name, 'r').read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join(map(str, content))) + counter = document.attributes.get('_plot_counter', 0) + 1 + document.attributes['_plot_counter'] = counter + base, ext = os.path.splitext(os.path.basename(source_file_name)) + output_base = '%s-%d.py' % (base, counter) + + base, source_ext = os.path.splitext(output_base) + if source_ext in ('.py', '.rst', '.txt'): + output_base = base + else: + source_ext = '' + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace('.', '-') + + # is it in doctest format? + is_doctest = contains_doctest(code) + if options.has_key('format'): + if options['format'] == 'python': + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), + 'plot_directive', + source_rel_dir) + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, + source_rel_dir)) + + # how to link to files from the RST file + dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), + source_rel_dir).replace(os.path.sep, '/') + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + source_link = dest_dir_link + '/' + output_base + source_ext + + # make figures + try: + results = makefig(code, source_file_name, build_dir, output_base, + config) + errors = [] + except PlotError, err: + reporter = state.memo.reporter + sm = reporter.system_message( + 2, "Exception occurred in plotting %s: %s" % (output_base, err), + line=lineno) + results = [(code, [])] + errors = [sm] + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s' % row.rstrip() + for row in code_piece.split('\n')] + source_code = "\n".join(lines) + else: + source_code = "" + + opts = [':%s: %s' % (key, val) for key, val in options.items() + if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + + if j == 0: + src_link = source_link + else: + src_link = None + + result = format_template( + TEMPLATE, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.plot_html_show_formats) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + shutil.copyfile(fn, os.path.join(dest_dir, + os.path.basename(fn))) + + # copy script (if necessary) + if source_file_name == rst_file: + target_name = os.path.join(dest_dir, output_base + source_ext) + f = open(target_name, 'w') + f.write(unescape_doctest(code)) + f.close() + + return errors + + +#------------------------------------------------------------------------------ +# Run code and capture figures +#------------------------------------------------------------------------------ + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.image as image +from matplotlib import _pylab_helpers + +import exceptions + +def contains_doctest(text): + try: + # check if it's valid Python as-is + compile(text, '', 'exec') + return False + except SyntaxError: + pass + r = re.compile(r'^\s*>>>', re.M) + m = r.search(text) + return bool(m) + +def unescape_doctest(text): + """ + Extract code from a piece of text, which contains either Python code + or doctests. + + """ + if not contains_doctest(text): + return text + + code = "" + for line in text.split("\n"): + m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + if m: + code += m.group(2) + "\n" + elif line.strip(): + code += "# " + line.strip() + "\n" + else: + code += "\n" + return code + +def split_code_at_show(text): + """ + Split code at plt.show() + + """ + + parts = [] + is_doctest = contains_doctest(text) + + part = [] + for line in text.split("\n"): + if (not is_doctest and line.strip() == 'plt.show()') or \ + (is_doctest and line.strip() == '>>> plt.show()'): + part.append(line) + parts.append("\n".join(part)) + part = [] + else: + part.append(line) + if "\n".join(part).strip(): + parts.append("\n".join(part)) + return parts + +class PlotError(RuntimeError): + pass + +def run_code(code, code_path, ns=None): + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. + pwd = os.getcwd() + old_sys_path = list(sys.path) + if code_path is not None: + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Redirect stdout + stdout = sys.stdout + sys.stdout = cStringIO.StringIO() + + # Reset sys.argv + old_sys_argv = sys.argv + sys.argv = [code_path] + + try: + try: + code = unescape_doctest(code) + if ns is None: + ns = {} + if not ns: + exec setup.config.plot_pre_code in ns + exec code in ns + except (Exception, SystemExit), err: + raise PlotError(traceback.format_exc()) + finally: + os.chdir(pwd) + sys.argv = old_sys_argv + sys.path[:] = old_sys_path + sys.stdout = stdout + return ns + + +#------------------------------------------------------------------------------ +# Generating figures +#------------------------------------------------------------------------------ + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime) + + +def makefig(code, code_path, output_dir, output_base, config): + """ + Run a pyplot script *code* and save the images under *output_dir* + with file names derived from *output_base* + + """ + + # -- Parse format list + default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} + formats = [] + for fmt in config.plot_formats: + if isinstance(fmt, str): + formats.append((fmt, default_dpi.get(fmt, 80))) + elif type(fmt) in (tuple, list) and len(fmt)==2: + formats.append((str(fmt[0]), int(fmt[1]))) + else: + raise PlotError('invalid image format "%r" in plot_formats' % fmt) + + # -- Try to determine if all images already exist + + code_pieces = split_code_at_show(code) + + # Look for single-figure output files first + all_exists = True + img = ImageFile(output_base, output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + if all_exists: + return [(code, [img])] + + # Then look for multi-figure output files + results = [] + all_exists = True + for i, code_piece in enumerate(code_pieces): + images = [] + for j in xrange(1000): + img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + # assume that if we have one, we have them all + if not all_exists: + all_exists = (j > 0) + break + images.append(img) + if not all_exists: + break + results.append((code_piece, images)) + + if all_exists: + return results + + # -- We didn't find the files, so build them + + results = [] + ns = {} + + for i, code_piece in enumerate(code_pieces): + # Clear between runs + plt.close('all') + + # Run code + run_code(code_piece, code_path, ns) + + # Collect images + images = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for j, figman in enumerate(fig_managers): + if len(fig_managers) == 1 and len(code_pieces) == 1: + img = ImageFile(output_base, output_dir) + else: + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), + output_dir) + images.append(img) + for format, dpi in formats: + try: + figman.canvas.figure.savefig(img.filename(format), dpi=dpi) + except exceptions.BaseException, err: + raise PlotError(traceback.format_exc()) + img.formats.append(format) + + # Results + results.append((code_piece, images)) + + return results + + +#------------------------------------------------------------------------------ +# Relative pathnames +#------------------------------------------------------------------------------ + +try: + from os.path import relpath +except ImportError: + # Copied from Python 2.7 + if 'posix' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir + + if not path: + raise ValueError("no path specified") + + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + elif 'nt' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir, splitunc + + if not path: + raise ValueError("no path specified") + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = splitunc(path) + unc_start, rest = splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + else: + raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/docs/sphinxext/setup.py b/docs/sphinxext/setup.py new file mode 100644 index 0000000000..76e3fd81bb --- /dev/null +++ b/docs/sphinxext/setup.py @@ -0,0 +1,31 @@ +from distutils.core import setup +import setuptools +import sys, os + +version = "0.4" + +setup( + name="numpydoc", + packages=["numpydoc"], + package_dir={"numpydoc": ""}, + version=version, + description="Sphinx extension to support docstrings in Numpy format", + # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers + classifiers=["Development Status :: 3 - Alpha", + "Environment :: Plugins", + "License :: OSI Approved :: BSD License", + "Topic :: Documentation"], + keywords="sphinx numpy", + author="Pauli Virtanen and others", + author_email="pav@iki.fi", + url="http://github.com/numpy/numpy/tree/master/doc/sphinxext", + license="BSD", + zip_safe=False, + install_requires=["Sphinx >= 1.0.1"], + package_data={'numpydoc': 'tests', '': ''}, + entry_points={ + "console_scripts": [ + "autosummary_generate = numpydoc.autosummary_generate:main", + ], + }, +) diff --git a/docs/sphinxext/traitsdoc.py b/docs/sphinxext/traitsdoc.py new file mode 100644 index 0000000000..0fcf2c1cd3 --- /dev/null +++ b/docs/sphinxext/traitsdoc.py @@ -0,0 +1,140 @@ +""" +========= +traitsdoc +========= + +Sphinx extension that handles docstrings in the Numpy standard format, [1] +and support Traits [2]. + +This extension can be used as a replacement for ``numpydoc`` when support +for Traits is required. + +.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [2] http://code.enthought.com/projects/traits/ + +""" + +import inspect +import os +import pydoc + +import docscrape +import docscrape_sphinx +from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString + +import numpydoc + +import comment_eater + +class SphinxTraitsDoc(SphinxClassDoc): + def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + docstring = pydoc.getdoc(cls) + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = docscrape.Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Description': [], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Traits': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'References': '', + 'Example': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Description'] + self['Extended Summary'] + [''] + + def __str__(self, indent=0, func_role="func"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Traits', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_see_also("obj") + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Example') + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +def looks_like_issubclass(obj, classname): + """ Return True if the object has a class or superclass with the given class + name. + + Ignores old-style classes. + """ + t = obj + if t.__name__ == classname: + return True + for klass in t.__mro__: + if klass.__name__ == classname: + return True + return False + +def get_doc_object(obj, what=None, config=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif callable(obj): + what = 'function' + else: + what = 'object' + if what == 'class': + doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) + if looks_like_issubclass(obj, 'HasTraits'): + for name, trait, comment in comment_eater.get_class_traits(obj): + # Exclude private traits. + if not name.startswith('_'): + doc['Traits'].append((name, trait, comment.splitlines())) + return doc + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '', config=config) + else: + return SphinxDocString(pydoc.getdoc(obj), config=config) + +def setup(app): + # init numpydoc + numpydoc.setup(app, get_doc_object) +