Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Move Sphinx extensions under Numpy's SVN trunk

  • Loading branch information...
commit 8c542b5be4ad43fc8d4a85bda9d49343f872d105 1 parent 00f7011
Pauli Virtanen pv authored
7 doc/Makefile
View
@@ -37,15 +37,12 @@ dist: html
cd build/dist && tar czf ../dist.tar.gz *
generate: build/generate-stamp
-build/generate-stamp: $(wildcard source/reference/*.rst) ext
+build/generate-stamp: $(wildcard source/reference/*.rst)
mkdir -p build
- ./ext/autosummary_generate.py source/reference/*.rst \
+ ./sphinxext/autosummary_generate.py source/reference/*.rst \
-p dump.xml -o source/reference/generated
touch build/generate-stamp
-ext:
- svn co http://sphinx.googlecode.com/svn/contrib/trunk/numpyext ext
-
html: generate
mkdir -p build/html build/doctrees
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
2  doc/source/conf.py
View
@@ -5,7 +5,7 @@
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
-sys.path.append(os.path.abspath('../ext'))
+sys.path.append(os.path.abspath('../sphinxext'))
# Check Sphinx version
import sphinx
97 doc/sphinxext/LICENSE.txt
View
@@ -0,0 +1,97 @@
+-------------------------------------------------------------------------------
+ The files
+ - numpydoc.py
+ - autosummary.py
+ - autosummary_generate.py
+ - docscrape.py
+ - docscrape_sphinx.py
+ - phantom_import.py
+ have the following license:
+
+Copyright (C) 2008 Stefan van der Walt <stefan@mentat.za.net>, Pauli Virtanen <pav@iki.fi>
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+-------------------------------------------------------------------------------
+ The files
+ - compiler_unparse.py
+ - comment_eater.py
+ - traitsdoc.py
+ have the following license:
+
+This software is OSI Certified Open Source Software.
+OSI Certified is a certification mark of the Open Source Initiative.
+
+Copyright (c) 2006, Enthought, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Enthought, Inc. nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-------------------------------------------------------------------------------
+ The files
+ - only_directives.py
+ - plot_directive.py
+ originate from Matplotlib (http://matplotlib.sf.net/) which has
+ the following license:
+
+Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
+
+1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3.
+
+4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
+
0  doc/sphinxext/__init__.py
View
No changes.
334 doc/sphinxext/autosummary.py
View
@@ -0,0 +1,334 @@
+"""
+===========
+autosummary
+===========
+
+Sphinx extension that adds an autosummary:: directive, which can be
+used to generate function/method/attribute/etc. summary lists, similar
+to those output eg. by Epydoc and other API doc generation tools.
+
+An :autolink: role is also provided.
+
+autosummary directive
+---------------------
+
+The autosummary directive has the form::
+
+ .. autosummary::
+ :nosignatures:
+ :toctree: generated/
+
+ module.function_1
+ module.function_2
+ ...
+
+and it generates an output table (containing signatures, optionally)
+
+ ======================== =============================================
+ module.function_1(args) Summary line from the docstring of function_1
+ module.function_2(args) Summary line from the docstring
+ ...
+ ======================== =============================================
+
+If the :toctree: option is specified, files matching the function names
+are inserted to the toctree with the given prefix:
+
+ generated/module.function_1
+ generated/module.function_2
+ ...
+
+Note: The file names contain the module:: or currentmodule:: prefixes.
+
+.. seealso:: autosummary_generate.py
+
+
+autolink role
+-------------
+
+The autolink role functions as ``:obj:`` when the name referred can be
+resolved to a Python object, and otherwise it becomes simple emphasis.
+This can be used as the default role to make links 'smart'.
+
+"""
+import sys, os, posixpath, re
+
+from docutils.parsers.rst import directives
+from docutils.statemachine import ViewList
+from docutils import nodes
+
+import sphinx.addnodes, sphinx.roles, sphinx.builder
+from sphinx.util import patfilter
+
+from docscrape_sphinx import get_doc_object
+
+
+def setup(app):
+ app.add_directive('autosummary', autosummary_directive, True, (0, 0, False),
+ toctree=directives.unchanged,
+ nosignatures=directives.flag)
+ app.add_role('autolink', autolink_role)
+
+ app.add_node(autosummary_toc,
+ html=(autosummary_toc_visit_html, autosummary_toc_depart_noop),
+ latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop))
+ app.connect('doctree-read', process_autosummary_toc)
+
+#------------------------------------------------------------------------------
+# autosummary_toc node
+#------------------------------------------------------------------------------
+
+class autosummary_toc(nodes.comment):
+ pass
+
+def process_autosummary_toc(app, doctree):
+ """
+ Insert items described in autosummary:: to the TOC tree, but do
+ not generate the toctree:: list.
+
+ """
+ env = app.builder.env
+ crawled = {}
+ def crawl_toc(node, depth=1):
+ crawled[node] = True
+ for j, subnode in enumerate(node):
+ try:
+ if (isinstance(subnode, autosummary_toc)
+ and isinstance(subnode[0], sphinx.addnodes.toctree)):
+ env.note_toctree(env.docname, subnode[0])
+ continue
+ except IndexError:
+ continue
+ if not isinstance(subnode, nodes.section):
+ continue
+ if subnode not in crawled:
+ crawl_toc(subnode, depth+1)
+ crawl_toc(doctree)
+
+def autosummary_toc_visit_html(self, node):
+ """Hide autosummary toctree list in HTML output"""
+ raise nodes.SkipNode
+
+def autosummary_toc_visit_latex(self, node):
+ """Show autosummary toctree (= put the referenced pages here) in Latex"""
+ pass
+
+def autosummary_toc_depart_noop(self, node):
+ pass
+
+#------------------------------------------------------------------------------
+# .. autosummary::
+#------------------------------------------------------------------------------
+
+def autosummary_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """
+ Pretty table containing short signatures and summaries of functions etc.
+
+ autosummary also generates a (hidden) toctree:: node.
+
+ """
+
+ names = []
+ names += [x.strip() for x in content if x.strip()]
+
+ table, warnings, real_names = get_autosummary(names, state,
+ 'nosignatures' in options)
+ node = table
+
+ env = state.document.settings.env
+ suffix = env.config.source_suffix
+ all_docnames = env.found_docs.copy()
+ dirname = posixpath.dirname(env.docname)
+
+ if 'toctree' in options:
+ tree_prefix = options['toctree'].strip()
+ docnames = []
+ for name in names:
+ name = real_names.get(name, name)
+
+ docname = tree_prefix + name
+ if docname.endswith(suffix):
+ docname = docname[:-len(suffix)]
+ docname = posixpath.normpath(posixpath.join(dirname, docname))
+ if docname not in env.found_docs:
+ warnings.append(state.document.reporter.warning(
+ 'toctree references unknown document %r' % docname,
+ line=lineno))
+ docnames.append(docname)
+
+ tocnode = sphinx.addnodes.toctree()
+ tocnode['includefiles'] = docnames
+ tocnode['maxdepth'] = -1
+ tocnode['glob'] = None
+
+ tocnode = autosummary_toc('', '', tocnode)
+ return warnings + [node] + [tocnode]
+ else:
+ return warnings + [node]
+
+def get_autosummary(names, state, no_signatures=False):
+ """
+ Generate a proper table node for autosummary:: directive.
+
+ Parameters
+ ----------
+ names : list of str
+ Names of Python objects to be imported and added to the table.
+ document : document
+ Docutils document object
+
+ """
+ document = state.document
+
+ real_names = {}
+ warnings = []
+
+ prefixes = ['']
+ prefixes.insert(0, document.settings.env.currmodule)
+
+ table = nodes.table('')
+ group = nodes.tgroup('', cols=2)
+ table.append(group)
+ group.append(nodes.colspec('', colwidth=30))
+ group.append(nodes.colspec('', colwidth=70))
+ body = nodes.tbody('')
+ group.append(body)
+
+ def append_row(*column_texts):
+ row = nodes.row('')
+ for text in column_texts:
+ node = nodes.paragraph('')
+ vl = ViewList()
+ vl.append(text, '<autosummary>')
+ state.nested_parse(vl, 0, node)
+ row.append(nodes.entry('', node))
+ body.append(row)
+
+ for name in names:
+ try:
+ obj, real_name = import_by_name(name, prefixes=prefixes)
+ except ImportError:
+ warnings.append(document.reporter.warning(
+ 'failed to import %s' % name))
+ append_row(":obj:`%s`" % name, "")
+ continue
+
+ real_names[name] = real_name
+
+ doc = get_doc_object(obj)
+
+ if doc['Summary']:
+ title = " ".join(doc['Summary'])
+ else:
+ title = ""
+
+ col1 = ":obj:`%s <%s>`" % (name, real_name)
+ if doc['Signature']:
+ sig = re.sub('^[a-zA-Z_0-9.-]*', '', doc['Signature'])
+ if '=' in sig:
+ # abbreviate optional arguments
+ sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1)
+ sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1)
+ sig = re.sub(r'=[^,)]+,', ',', sig)
+ sig = re.sub(r'=[^,)]+\)$', '])', sig)
+ # shorten long strings
+ sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig)
+ else:
+ sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig)
+ col1 += " " + sig
+ col2 = title
+ append_row(col1, col2)
+
+ return table, warnings, real_names
+
+def import_by_name(name, prefixes=[None]):
+ """
+ Import a Python object that has the given name, under one of the prefixes.
+
+ Parameters
+ ----------
+ name : str
+ Name of a Python object, eg. 'numpy.ndarray.view'
+ prefixes : list of (str or None), optional
+ Prefixes to prepend to the name (None implies no prefix).
+ The first prefixed name that results to successful import is used.
+
+ Returns
+ -------
+ obj
+ The imported object
+ name
+ Name of the imported object (useful if `prefixes` was used)
+
+ """
+ for prefix in prefixes:
+ try:
+ if prefix:
+ prefixed_name = '.'.join([prefix, name])
+ else:
+ prefixed_name = name
+ return _import_by_name(prefixed_name), prefixed_name
+ except ImportError:
+ pass
+ raise ImportError
+
+def _import_by_name(name):
+ """Import a Python object given its full name"""
+ try:
+ # try first interpret `name` as MODNAME.OBJ
+ name_parts = name.split('.')
+ try:
+ modname = '.'.join(name_parts[:-1])
+ __import__(modname)
+ return getattr(sys.modules[modname], name_parts[-1])
+ except (ImportError, IndexError, AttributeError):
+ pass
+
+ # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
+ last_j = 0
+ modname = None
+ for j in reversed(range(1, len(name_parts)+1)):
+ last_j = j
+ modname = '.'.join(name_parts[:j])
+ try:
+ __import__(modname)
+ except ImportError:
+ continue
+ if modname in sys.modules:
+ break
+
+ if last_j < len(name_parts):
+ obj = sys.modules[modname]
+ for obj_name in name_parts[last_j:]:
+ obj = getattr(obj, obj_name)
+ return obj
+ else:
+ return sys.modules[modname]
+ except (ValueError, ImportError, AttributeError, KeyError), e:
+ raise ImportError(e)
+
+#------------------------------------------------------------------------------
+# :autolink: (smart default role)
+#------------------------------------------------------------------------------
+
+def autolink_role(typ, rawtext, etext, lineno, inliner,
+ options={}, content=[]):
+ """
+ Smart linking role.
+
+ Expands to ":obj:`text`" if `text` is an object that can be imported;
+ otherwise expands to "*text*".
+ """
+ r = sphinx.roles.xfileref_role('obj', rawtext, etext, lineno, inliner,
+ options, content)
+ pnode = r[0][0]
+
+ prefixes = [None]
+ #prefixes.insert(0, inliner.document.settings.env.currmodule)
+ try:
+ obj, name = import_by_name(pnode['reftarget'], prefixes)
+ except ImportError:
+ content = pnode[0]
+ r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
+ classes=content['classes'])
+ return r
189 doc/sphinxext/autosummary_generate.py
View
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+r"""
+autosummary_generate.py OPTIONS FILES
+
+Generate automatic RST source files for items referred to in
+autosummary:: directives.
+
+Each generated RST file contains a single auto*:: directive which
+extracts the docstring of the referred item.
+
+Example Makefile rule::
+
+ generate:
+ ./ext/autosummary_generate.py -o source/generated source/*.rst
+
+"""
+import glob, re, inspect, os, optparse
+from autosummary import import_by_name
+
+try:
+ from phantom_import import import_phantom_module
+except ImportError:
+ import_phantom_module = lambda x: x
+
+def main():
+ p = optparse.OptionParser(__doc__.strip())
+ p.add_option("-p", "--phantom", action="store", type="string",
+ dest="phantom", default=None,
+ help="Phantom import modules from a file")
+ p.add_option("-o", "--output-dir", action="store", type="string",
+ dest="output_dir", default=None,
+ help=("Write all output files to the given directory (instead "
+ "of writing them as specified in the autosummary:: "
+ "directives)"))
+ options, args = p.parse_args()
+
+ if len(args) == 0:
+ p.error("wrong number of arguments")
+
+ if options.phantom and os.path.isfile(options.phantom):
+ import_phantom_module(options.phantom)
+
+ # read
+ names = {}
+ for name, loc in get_documented(args).items():
+ for (filename, sec_title, keyword, toctree) in loc:
+ if toctree is not None:
+ path = os.path.join(os.path.dirname(filename), toctree)
+ names[name] = os.path.abspath(path)
+
+ # write
+ for name, path in sorted(names.items()):
+ if options.output_dir is not None:
+ path = options.output_dir
+
+ if not os.path.isdir(path):
+ os.makedirs(path)
+
+ try:
+ obj, name = import_by_name(name)
+ except ImportError, e:
+ print "Failed to import '%s': %s" % (name, e)
+ continue
+
+ fn = os.path.join(path, '%s.rst' % name)
+
+ if os.path.exists(fn):
+ # skip
+ continue
+
+ f = open(fn, 'w')
+
+ try:
+ f.write('%s\n%s\n\n' % (name, '='*len(name)))
+
+ if inspect.isclass(obj):
+ if issubclass(obj, Exception):
+ f.write(format_modulemember(name, 'autoexception'))
+ else:
+ f.write(format_modulemember(name, 'autoclass'))
+ elif inspect.ismodule(obj):
+ f.write(format_modulemember(name, 'automodule'))
+ elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
+ f.write(format_classmember(name, 'automethod'))
+ elif callable(obj):
+ f.write(format_modulemember(name, 'autofunction'))
+ elif hasattr(obj, '__get__'):
+ f.write(format_classmember(name, 'autoattribute'))
+ else:
+ f.write(format_modulemember(name, 'autofunction'))
+ finally:
+ f.close()
+
+def format_modulemember(name, directive):
+ parts = name.split('.')
+ mod, name = '.'.join(parts[:-1]), parts[-1]
+ return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
+
+def format_classmember(name, directive):
+ parts = name.split('.')
+ mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:])
+ return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
+
+def get_documented(filenames):
+ """
+ Find out what items are documented in source/*.rst
+
+ Returns
+ -------
+ documented : dict of list of (filename, title, keyword, toctree)
+ Dictionary whose keys are documented names of objects.
+ The value is a list of locations where the object was documented.
+ Each location is a tuple of filename, the current section title,
+ the name of the directive, and the value of the :toctree: argument
+ (if present) of the directive.
+
+ """
+ title_underline_re = re.compile("^[-=*_^#]{3,}\s*$")
+ autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$")
+ autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*')
+ module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
+ autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*')
+ toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
+
+ documented = {}
+
+ for filename in filenames:
+ current_title = []
+ last_line = None
+ toctree = None
+ current_module = None
+ in_autosummary = False
+
+ f = open(filename, 'r')
+ for line in f:
+ try:
+ if in_autosummary:
+ m = toctree_arg_re.match(line)
+ if m:
+ toctree = m.group(1)
+ continue
+
+ if line.strip().startswith(':'):
+ continue # skip options
+
+ m = autosummary_item_re.match(line)
+ if m:
+ name = m.group(1).strip()
+ if current_module and not name.startswith(current_module + '.'):
+ name = "%s.%s" % (current_module, name)
+ documented.setdefault(name, []).append(
+ (filename, current_title, 'autosummary', toctree))
+ continue
+ if line.strip() == '':
+ continue
+ in_autosummary = False
+
+ m = autosummary_re.match(line)
+ if m:
+ in_autosummary = True
+ continue
+
+ m = autodoc_re.search(line)
+ if m:
+ name = m.group(2).strip()
+ if current_module and not name.startswith(current_module + '.'):
+ name = "%s.%s" % (current_module, name)
+ if m.group(1) == "module":
+ current_module = name
+ documented.setdefault(name, []).append(
+ (filename, current_title, "auto" + m.group(1), None))
+ continue
+
+ m = title_underline_re.match(line)
+ if m and last_line:
+ current_title = last_line.strip()
+ continue
+
+ m = module_re.match(line)
+ if m:
+ current_module = m.group(2)
+ continue
+ finally:
+ last_line = line
+
+ return documented
+
+if __name__ == "__main__":
+ main()
158 doc/sphinxext/comment_eater.py
View
@@ -0,0 +1,158 @@
+from cStringIO import StringIO
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from compiler_unparse import unparse
+
+
+class Comment(object):
+ """ A comment block.
+ """
+ is_comment = True
+ def __init__(self, start_lineno, end_lineno, text):
+ # int : The first line number in the block. 1-indexed.
+ self.start_lineno = start_lineno
+ # int : The last line number. Inclusive!
+ self.end_lineno = end_lineno
+ # str : The text block including '#' character but not any leading spaces.
+ self.text = text
+
+ def add(self, string, start, end, line):
+ """ Add a new comment line.
+ """
+ self.start_lineno = min(self.start_lineno, start[0])
+ self.end_lineno = max(self.end_lineno, end[0])
+ self.text += string
+
+ def __repr__(self):
+ return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+ self.end_lineno, self.text)
+
+
+class NonComment(object):
+ """ A non-comment block of code.
+ """
+ is_comment = False
+ def __init__(self, start_lineno, end_lineno):
+ self.start_lineno = start_lineno
+ self.end_lineno = end_lineno
+
+ def add(self, string, start, end, line):
+ """ Add lines to the block.
+ """
+ if string.strip():
+ # Only add if not entirely whitespace.
+ self.start_lineno = min(self.start_lineno, start[0])
+ self.end_lineno = max(self.end_lineno, end[0])
+
+ def __repr__(self):
+ return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+ self.end_lineno)
+
+
+class CommentBlocker(object):
+ """ Pull out contiguous comment blocks.
+ """
+ def __init__(self):
+ # Start with a dummy.
+ self.current_block = NonComment(0, 0)
+
+ # All of the blocks seen so far.
+ self.blocks = []
+
+ # The index mapping lines of code to their associated comment blocks.
+ self.index = {}
+
+ def process_file(self, file):
+ """ Process a file object.
+ """
+ for token in tokenize.generate_tokens(file.next):
+ self.process_token(*token)
+ self.make_index()
+
+ def process_token(self, kind, string, start, end, line):
+ """ Process a single token.
+ """
+ if self.current_block.is_comment:
+ if kind == tokenize.COMMENT:
+ self.current_block.add(string, start, end, line)
+ else:
+ self.new_noncomment(start[0], end[0])
+ else:
+ if kind == tokenize.COMMENT:
+ self.new_comment(string, start, end, line)
+ else:
+ self.current_block.add(string, start, end, line)
+
+ def new_noncomment(self, start_lineno, end_lineno):
+ """ We are transitioning from a noncomment to a comment.
+ """
+ block = NonComment(start_lineno, end_lineno)
+ self.blocks.append(block)
+ self.current_block = block
+
+ def new_comment(self, string, start, end, line):
+ """ Possibly add a new comment.
+
+ Only adds a new comment if this comment is the only thing on the line.
+ Otherwise, it extends the noncomment block.
+ """
+ prefix = line[:start[1]]
+ if prefix.strip():
+ # Oops! Trailing comment, not a comment block.
+ self.current_block.add(string, start, end, line)
+ else:
+ # A comment block.
+ block = Comment(start[0], end[0], string)
+ self.blocks.append(block)
+ self.current_block = block
+
+ def make_index(self):
+ """ Make the index mapping lines of actual code to their associated
+ prefix comments.
+ """
+ for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+ if not block.is_comment:
+ self.index[block.start_lineno] = prev
+
+ def search_for_comment(self, lineno, default=None):
+ """ Find the comment block just before the given line number.
+
+ Returns None (or the specified default) if there is no such block.
+ """
+ if not self.index:
+ self.make_index()
+ block = self.index.get(lineno, None)
+ text = getattr(block, 'text', default)
+ return text
+
+
+def strip_comment_marker(text):
+ """ Strip # markers at the front of a block of comment text.
+ """
+ lines = []
+ for line in text.splitlines():
+ lines.append(line.lstrip('#'))
+ text = textwrap.dedent('\n'.join(lines))
+ return text
+
+
+def get_class_traits(klass):
+ """ Yield all of the documentation for trait definitions on a class object.
+ """
+ # FIXME: gracefully handle errors here or in the caller?
+ source = inspect.getsource(klass)
+ cb = CommentBlocker()
+ cb.process_file(StringIO(source))
+ mod_ast = compiler.parse(source)
+ class_ast = mod_ast.node.nodes[0]
+ for node in class_ast.code.nodes:
+ # FIXME: handle other kinds of assignments?
+ if isinstance(node, compiler.ast.Assign):
+ name = node.nodes[0].name
+ rhs = unparse(node.expr).strip()
+ doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+ yield name, rhs, doc
+
860 doc/sphinxext/compiler_unparse.py
View
@@ -0,0 +1,860 @@
+""" Turn compiler.ast structures back into executable python code.
+
+ The unparse method takes a compiler.ast tree and transforms it back into
+ valid python code. It is incomplete and currently only works for
+ import statements, function calls, function definitions, assignments, and
+ basic expressions.
+
+ Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+ fixme: We may want to move to using _ast trees because the compiler for
+ them is about 6 times faster than compiler.compile.
+"""
+
+import sys
+import cStringIO
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+def unparse(ast, single_line_functions=False):
+ s = cStringIO.StringIO()
+ UnparseCompilerAst(ast, s, single_line_functions)
+ return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+ 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+ """ Methods in this class recursively traverse an AST and
+ output source code for the abstract syntax; original formatting
+ is disregarged.
+ """
+
+ #########################################################################
+ # object interface.
+ #########################################################################
+
+ def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+ """ Unparser(tree, file=sys.stdout) -> None.
+
+ Print the source for tree to file.
+ """
+ self.f = file
+ self._single_func = single_line_functions
+ self._do_indent = True
+ self._indent = 0
+ self._dispatch(tree)
+ self._write("\n")
+ self.f.flush()
+
+ #########################################################################
+ # Unparser private interface.
+ #########################################################################
+
+ ### format, output, and dispatch methods ################################
+
+ def _fill(self, text = ""):
+ "Indent a piece of text, according to the current indentation level"
+ if self._do_indent:
+ self._write("\n"+" "*self._indent + text)
+ else:
+ self._write(text)
+
+ def _write(self, text):
+ "Append a piece of text to the current line."
+ self.f.write(text)
+
+ def _enter(self):
+ "Print ':', and increase the indentation."
+ self._write(": ")
+ self._indent += 1
+
+ def _leave(self):
+ "Decrease the indentation level."
+ self._indent -= 1
+
+ def _dispatch(self, tree):
+ "_dispatcher function, _dispatching tree type T to method _T."
+ if isinstance(tree, list):
+ for t in tree:
+ self._dispatch(t)
+ return
+ meth = getattr(self, "_"+tree.__class__.__name__)
+ if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+ return
+ meth(tree)
+
+
+ #########################################################################
+ # compiler.ast unparsing methods.
+ #
+ # There should be one method per concrete grammar type. They are
+ # organized in alphabetical order.
+ #########################################################################
+
+ def _Add(self, t):
+ self.__binary_op(t, '+')
+
+ def _And(self, t):
+ self._write(" (")
+ for i, node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i != len(t.nodes)-1:
+ self._write(") and (")
+ self._write(")")
+
+ def _AssAttr(self, t):
+ """ Handle assigning an attribute of an object
+ """
+ self._dispatch(t.expr)
+ self._write('.'+t.attrname)
+
+ def _Assign(self, t):
+ """ Expression Assignment such as "a = 1".
+
+ This only handles assignment in expressions. Keyword assignment
+ is handled separately.
+ """
+ self._fill()
+ for target in t.nodes:
+ self._dispatch(target)
+ self._write(" = ")
+ self._dispatch(t.expr)
+ if not self._do_indent:
+ self._write('; ')
+
+ def _AssName(self, t):
+ """ Name on left hand side of expression.
+
+ Treat just like a name on the right side of an expression.
+ """
+ self._Name(t)
+
+ def _AssTuple(self, t):
+ """ Tuple on left hand side of an expression.
+ """
+
+ # _write each elements, separated by a comma.
+ for element in t.nodes[:-1]:
+ self._dispatch(element)
+ self._write(", ")
+
+ # Handle the last one without writing comma
+ last_element = t.nodes[-1]
+ self._dispatch(last_element)
+
+ def _AugAssign(self, t):
+ """ +=,-=,*=,/=,**=, etc. operations
+ """
+
+ self._fill()
+ self._dispatch(t.node)
+ self._write(' '+t.op+' ')
+ self._dispatch(t.expr)
+ if not self._do_indent:
+ self._write(';')
+
+ def _Bitand(self, t):
+ """ Bit and operation.
+ """
+
+ for i, node in enumerate(t.nodes):
+ self._write("(")
+ self._dispatch(node)
+ self._write(")")
+ if i != len(t.nodes)-1:
+ self._write(" & ")
+
+ def _Bitor(self, t):
+ """ Bit or operation
+ """
+
+ for i, node in enumerate(t.nodes):
+ self._write("(")
+ self._dispatch(node)
+ self._write(")")
+ if i != len(t.nodes)-1:
+ self._write(" | ")
+
+ def _CallFunc(self, t):
+ """ Function call.
+ """
+ self._dispatch(t.node)
+ self._write("(")
+ comma = False
+ for e in t.args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._dispatch(e)
+ if t.star_args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._write("*")
+ self._dispatch(t.star_args)
+ if t.dstar_args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._write("**")
+ self._dispatch(t.dstar_args)
+ self._write(")")
+
+ def _Compare(self, t):
+ self._dispatch(t.expr)
+ for op, expr in t.ops:
+ self._write(" " + op + " ")
+ self._dispatch(expr)
+
+ def _Const(self, t):
+ """ A constant value such as an integer value, 3, or a string, "hello".
+ """
+ self._dispatch(t.value)
+
+ def _Decorators(self, t):
+ """ Handle function decorators (eg. @has_units)
+ """
+ for node in t.nodes:
+ self._dispatch(node)
+
+ def _Dict(self, t):
+ self._write("{")
+ for i, (k, v) in enumerate(t.items):
+ self._dispatch(k)
+ self._write(": ")
+ self._dispatch(v)
+ if i < len(t.items)-1:
+ self._write(", ")
+ self._write("}")
+
+ def _Discard(self, t):
+ """ Node for when return value is ignored such as in "foo(a)".
+ """
+ self._fill()
+ self._dispatch(t.expr)
+
+ def _Div(self, t):
+ self.__binary_op(t, '/')
+
+ def _Ellipsis(self, t):
+ self._write("...")
+
+ def _From(self, t):
+ """ Handle "from xyz import foo, bar as baz".
+ """
+ # fixme: Are From and ImportFrom handled differently?
+ self._fill("from ")
+ self._write(t.modname)
+ self._write(" import ")
+ for i, (name,asname) in enumerate(t.names):
+ if i != 0:
+ self._write(", ")
+ self._write(name)
+ if asname is not None:
+ self._write(" as "+asname)
+
+ def _Function(self, t):
+ """ Handle function definitions
+ """
+ if t.decorators is not None:
+ self._fill("@")
+ self._dispatch(t.decorators)
+ self._fill("def "+t.name + "(")
+ defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+ for i, arg in enumerate(zip(t.argnames, defaults)):
+ self._write(arg[0])
+ if arg[1] is not None:
+ self._write('=')
+ self._dispatch(arg[1])
+ if i < len(t.argnames)-1:
+ self._write(', ')
+ self._write(")")
+ if self._single_func:
+ self._do_indent = False
+ self._enter()
+ self._dispatch(t.code)
+ self._leave()
+ self._do_indent = True
+
+ def _Getattr(self, t):
+ """ Handle getting an attribute of an object
+ """
+ if isinstance(t.expr, (Div, Mul, Sub, Add)):
+ self._write('(')
+ self._dispatch(t.expr)
+ self._write(')')
+ else:
+ self._dispatch(t.expr)
+
+ self._write('.'+t.attrname)
+
+ def _If(self, t):
+ self._fill()
+
+ for i, (compare,code) in enumerate(t.tests):
+ if i == 0:
+ self._write("if ")
+ else:
+ self._write("elif ")
+ self._dispatch(compare)
+ self._enter()
+ self._fill()
+ self._dispatch(code)
+ self._leave()
+ self._write("\n")
+
+ if t.else_ is not None:
+ self._write("else")
+ self._enter()
+ self._fill()
+ self._dispatch(t.else_)
+ self._leave()
+ self._write("\n")
+
+ def _IfExp(self, t):
+ self._dispatch(t.then)
+ self._write(" if ")
+ self._dispatch(t.test)
+
+ if t.else_ is not None:
+ self._write(" else (")
+ self._dispatch(t.else_)
+ self._write(")")
+
+ def _Import(self, t):
+ """ Handle "import xyz.foo".
+ """
+ self._fill("import ")
+
+ for i, (name,asname) in enumerate(t.names):
+ if i != 0:
+ self._write(", ")
+ self._write(name)
+ if asname is not None:
+ self._write(" as "+asname)
+
+ def _Keyword(self, t):
+ """ Keyword value assignment within function calls and definitions.
+ """
+ self._write(t.name)
+ self._write("=")
+ self._dispatch(t.expr)
+
+ def _List(self, t):
+ self._write("[")
+ for i,node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i < len(t.nodes)-1:
+ self._write(", ")
+ self._write("]")
+
+ def _Module(self, t):
+ if t.doc is not None:
+ self._dispatch(t.doc)
+ self._dispatch(t.node)
+
+ def _Mul(self, t):
+ self.__binary_op(t, '*')
+
+ def _Name(self, t):
+ self._write(t.name)
+
+ def _NoneType(self, t):
+ self._write("None")
+
+ def _Not(self, t):
+ self._write('not (')
+ self._dispatch(t.expr)
+ self._write(')')
+
+ def _Or(self, t):
+ self._write(" (")
+ for i, node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i != len(t.nodes)-1:
+ self._write(") or (")
+ self._write(")")
+
+ def _Pass(self, t):
+ self._write("pass\n")
+
+ def _Printnl(self, t):
+ self._fill("print ")
+ if t.dest:
+ self._write(">> ")
+ self._dispatch(t.dest)
+ self._write(", ")
+ comma = False
+ for node in t.nodes:
+ if comma: self._write(', ')
+ else: comma = True
+ self._dispatch(node)
+
+ def _Power(self, t):
+ self.__binary_op(t, '**')
+
+ def _Return(self, t):
+ self._fill("return ")
+ if t.value:
+ if isinstance(t.value, Tuple):
+ text = ', '.join([ name.name for name in t.value.asList() ])
+ self._write(text)
+ else:
+ self._dispatch(t.value)
+ if not self._do_indent:
+ self._write('; ')
+
+ def _Slice(self, t):
+ self._dispatch(t.expr)
+ self._write("[")
+ if t.lower:
+ self._dispatch(t.lower)
+ self._write(":")
+ if t.upper:
+ self._dispatch(t.upper)
+ #if t.step:
+ # self._write(":")
+ # self._dispatch(t.step)
+ self._write("]")
+
+ def _Sliceobj(self, t):
+ for i, node in enumerate(t.nodes):
+ if i != 0:
+ self._write(":")
+ if not (isinstance(node, Const) and node.value is None):
+ self._dispatch(node)
+
+ def _Stmt(self, tree):
+ for node in tree.nodes:
+ self._dispatch(node)
+
+ def _Sub(self, t):
+ self.__binary_op(t, '-')
+
+ def _Subscript(self, t):
+ self._dispatch(t.expr)
+ self._write("[")
+ for i, value in enumerate(t.subs):
+ if i != 0:
+ self._write(",")
+ self._dispatch(value)
+ self._write("]")
+
+ def _TryExcept(self, t):
+ self._fill("try")
+ self._enter()
+ self._dispatch(t.body)
+ self._leave()
+
+ for handler in t.handlers:
+ self._fill('except ')
+ self._dispatch(handler[0])
+ if handler[1] is not None:
+ self._write(', ')
+ self._dispatch(handler[1])
+ self._enter()
+ self._dispatch(handler[2])
+ self._leave()
+
+ if t.else_:
+ self._fill("else")
+ self._enter()
+ self._dispatch(t.else_)
+ self._leave()
+
+ def _Tuple(self, t):
+
+ if not t.nodes:
+ # Empty tuple.
+ self._write("()")
+ else:
+ self._write("(")
+
+ # _write each elements, separated by a comma.
+ for element in t.nodes[:-1]:
+ self._dispatch(element)
+ self._write(", ")
+
+ # Handle the last one without writing comma
+ last_element = t.nodes[-1]
+ self._dispatch(last_element)
+
+ self._write(")")
+
+ def _UnaryAdd(self, t):
+ self._write("+")
+ self._dispatch(t.expr)
+
+ def _UnarySub(self, t):
+ self._write("-")
+ self._dispatch(t.expr)
+
+ def _With(self, t):
+ self._fill('with ')
+ self._dispatch(t.expr)
+ if t.vars:
+ self._write(' as ')
+ self._dispatch(t.vars.name)
+ self._enter()
+ self._dispatch(t.body)
+ self._leave()
+ self._write('\n')
+
+ def _int(self, t):
+ self._write(repr(t))
+
+ def __binary_op(self, t, symbol):
+ # Check if parenthesis are needed on left side and then dispatch
+ has_paren = False
+ left_class = str(t.left.__class__)
+ if (left_class in op_precedence.keys() and
+ op_precedence[left_class] < op_precedence[str(t.__class__)]):
+ has_paren = True
+ if has_paren:
+ self._write('(')
+ self._dispatch(t.left)
+ if has_paren:
+ self._write(')')
+ # Write the appropriate symbol for operator
+ self._write(symbol)
+ # Check if parenthesis are needed on the right side and then dispatch
+ has_paren = False
+ right_class = str(t.right.__class__)
+ if (right_class in op_precedence.keys() and
+ op_precedence[right_class] < op_precedence[str(t.__class__)]):
+ has_paren = True
+ if has_paren:
+ self._write('(')
+ self._dispatch(t.right)
+ if has_paren:
+ self._write(')')
+
+ def _float(self, t):
+ # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+ # We prefer str here.
+ self._write(str(t))
+
+ def _str(self, t):
+ self._write(repr(t))
+
+ def _tuple(self, t):
+ self._write(str(t))
+
+ #########################################################################
+ # These are the methods from the _ast modules unparse.
+ #
+ # As our needs to handle more advanced code increase, we may want to
+ # modify some of the methods below so that they work for compiler.ast.
+ #########################################################################
+
+# # stmt
+# def _Expr(self, tree):
+# self._fill()
+# self._dispatch(tree.value)
+#
+# def _Import(self, t):
+# self._fill("import ")
+# first = True
+# for a in t.names:
+# if first:
+# first = False
+# else:
+# self._write(", ")
+# self._write(a.name)
+# if a.asname:
+# self._write(" as "+a.asname)
+#
+## def _ImportFrom(self, t):
+## self._fill("from ")
+## self._write(t.module)
+## self._write(" import ")
+## for i, a in enumerate(t.names):
+## if i == 0:
+## self._write(", ")
+## self._write(a.name)
+## if a.asname:
+## self._write(" as "+a.asname)
+## # XXX(jpe) what is level for?
+##
+#
+# def _Break(self, t):
+# self._fill("break")
+#
+# def _Continue(self, t):
+# self._fill("continue")
+#
+# def _Delete(self, t):
+# self._fill("del ")
+# self._dispatch(t.targets)
+#
+# def _Assert(self, t):
+# self._fill("assert ")
+# self._dispatch(t.test)
+# if t.msg:
+# self._write(", ")
+# self._dispatch(t.msg)
+#
+# def _Exec(self, t):
+# self._fill("exec ")
+# self._dispatch(t.body)
+# if t.globals:
+# self._write(" in ")
+# self._dispatch(t.globals)
+# if t.locals:
+# self._write(", ")
+# self._dispatch(t.locals)
+#
+# def _Print(self, t):
+# self._fill("print ")
+# do_comma = False
+# if t.dest:
+# self._write(">>")
+# self._dispatch(t.dest)
+# do_comma = True
+# for e in t.values:
+# if do_comma:self._write(", ")
+# else:do_comma=True
+# self._dispatch(e)
+# if not t.nl:
+# self._write(",")
+#
+# def _Global(self, t):
+# self._fill("global")
+# for i, n in enumerate(t.names):
+# if i != 0:
+# self._write(",")
+# self._write(" " + n)
+#
+# def _Yield(self, t):
+# self._fill("yield")
+# if t.value:
+# self._write(" (")
+# self._dispatch(t.value)
+# self._write(")")
+#
+# def _Raise(self, t):
+# self._fill('raise ')
+# if t.type:
+# self._dispatch(t.type)
+# if t.inst:
+# self._write(", ")
+# self._dispatch(t.inst)
+# if t.tback:
+# self._write(", ")
+# self._dispatch(t.tback)
+#
+#
+# def _TryFinally(self, t):
+# self._fill("try")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# self._fill("finally")
+# self._enter()
+# self._dispatch(t.finalbody)
+# self._leave()
+#
+# def _excepthandler(self, t):
+# self._fill("except ")
+# if t.type:
+# self._dispatch(t.type)
+# if t.name:
+# self._write(", ")
+# self._dispatch(t.name)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _ClassDef(self, t):
+# self._write("\n")
+# self._fill("class "+t.name)
+# if t.bases:
+# self._write("(")
+# for a in t.bases:
+# self._dispatch(a)
+# self._write(", ")
+# self._write(")")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _FunctionDef(self, t):
+# self._write("\n")
+# for deco in t.decorators:
+# self._fill("@")
+# self._dispatch(deco)
+# self._fill("def "+t.name + "(")
+# self._dispatch(t.args)
+# self._write(")")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _For(self, t):
+# self._fill("for ")
+# self._dispatch(t.target)
+# self._write(" in ")
+# self._dispatch(t.iter)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+# if t.orelse:
+# self._fill("else")
+# self._enter()
+# self._dispatch(t.orelse)
+# self._leave
+#
+# def _While(self, t):
+# self._fill("while ")
+# self._dispatch(t.test)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+# if t.orelse:
+# self._fill("else")
+# self._enter()
+# self._dispatch(t.orelse)
+# self._leave
+#
+# # expr
+# def _Str(self, tree):
+# self._write(repr(tree.s))
+##
+# def _Repr(self, t):
+# self._write("`")
+# self._dispatch(t.value)
+# self._write("`")
+#
+# def _Num(self, t):
+# self._write(repr(t.n))
+#
+# def _ListComp(self, t):
+# self._write("[")
+# self._dispatch(t.elt)
+# for gen in t.generators:
+# self._dispatch(gen)
+# self._write("]")
+#
+# def _GeneratorExp(self, t):
+# self._write("(")
+# self._dispatch(t.elt)
+# for gen in t.generators:
+# self._dispatch(gen)
+# self._write(")")
+#
+# def _comprehension(self, t):
+# self._write(" for ")
+# self._dispatch(t.target)
+# self._write(" in ")
+# self._dispatch(t.iter)
+# for if_clause in t.ifs:
+# self._write(" if ")
+# self._dispatch(if_clause)
+#
+# def _IfExp(self, t):
+# self._dispatch(t.body)
+# self._write(" if ")
+# self._dispatch(t.test)
+# if t.orelse:
+# self._write(" else ")
+# self._dispatch(t.orelse)
+#
+# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+# def _UnaryOp(self, t):
+# self._write(self.unop[t.op.__class__.__name__])
+# self._write("(")
+# self._dispatch(t.operand)
+# self._write(")")
+#
+# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+# "FloorDiv":"//", "Pow": "**"}
+# def _BinOp(self, t):
+# self._write("(")
+# self._dispatch(t.left)
+# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+# self._dispatch(t.right)
+# self._write(")")
+#
+# boolops = {_ast.And: 'and', _ast.Or: 'or'}
+# def _BoolOp(self, t):
+# self._write("(")
+# self._dispatch(t.values[0])
+# for v in t.values[1:]:
+# self._write(" %s " % self.boolops[t.op.__class__])
+# self._dispatch(v)
+# self._write(")")
+#
+# def _Attribute(self,t):
+# self._dispatch(t.value)
+# self._write(".")
+# self._write(t.attr)
+#
+## def _Call(self, t):
+## self._dispatch(t.func)
+## self._write("(")
+## comma = False
+## for e in t.args:
+## if comma: self._write(", ")
+## else: comma = True
+## self._dispatch(e)
+## for e in t.keywords:
+## if comma: self._write(", ")
+## else: comma = True
+## self._dispatch(e)
+## if t.starargs:
+## if comma: self._write(", ")
+## else: comma = True
+## self._write("*")
+## self._dispatch(t.starargs)
+## if t.kwargs:
+## if comma: self._write(", ")
+## else: comma = True
+## self._write("**")
+## self._dispatch(t.kwargs)
+## self._write(")")
+#
+# # slice
+# def _Index(self, t):
+# self._dispatch(t.value)
+#
+# def _ExtSlice(self, t):
+# for i, d in enumerate(t.dims):
+# if i != 0:
+# self._write(': ')
+# self._dispatch(d)
+#
+# # others
+# def _arguments(self, t):
+# first = True
+# nonDef = len(t.args)-len(t.defaults)
+# for a in t.args[0:nonDef]:
+# if first:first = False
+# else: self._write(", ")
+# self._dispatch(a)
+# for a,d in zip(t.args[nonDef:], t.defaults):
+# if first:first = False
+# else: self._write(", ")
+# self._dispatch(a),
+# self._write("=")
+# self._dispatch(d)
+# if t.vararg:
+# if first:first = False
+# else: self._write(", ")
+# self._write("*"+t.vararg)
+# if t.kwarg:
+# if first:first = False
+# else: self._write(", ")
+# self._write("**"+t.kwarg)
+#
+## def _keyword(self, t):
+## self._write(t.arg)
+## self._write("=")
+## self._dispatch(t.value)
+#
+# def _Lambda(self, t):
+# self._write("lambda ")
+# self._dispatch(t.args)
+# self._write(": ")
+# self._dispatch(t.body)
+
+
+
492 doc/sphinxext/docscrape.py
View
@@ -0,0 +1,492 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+4
+class Reader(object):
+ """A line-based string reader.
+
+ """
+ def __init__(self, data):
+ """
+ Parameters
+ ----------
+ data : str
+ String with lines separated by '\n'.
+
+ """
+ if isinstance(data,list):
+ self._str = data
+ else:
+ self._str = data.split('\n') # store string as list of lines
+
+ self.reset()
+
+ def __getitem__(self, n):
+ return self._str[n]
+
+ def reset(self):
+ self._l = 0 # current line nr
+
+ def read(self):
+ if not self.eof():
+ out = self[self._l]
+ self._l += 1
+ return out
+ else:
+ return ''
+
+ def seek_next_non_empty_line(self):
+ for l in self[self._l:]:
+ if l.strip():
+ break
+ else:
+ self._l += 1
+
+ def eof(self):
+ return self._l >= len(self._str)
+
+ def read_to_condition(self, condition_func):
+ start = self._l
+ for line in self[start:]:
+ if condition_func(line):
+ return self[start:self._l]
+ self._l += 1
+ if self.eof():
+ return self[start:self._l+1]
+ return []
+
+ def read_to_next_empty_line(self):
+ self.seek_next_non_empty_line()
+ def is_empty(line):
+ return not line.strip()
+ return self.read_to_condition(is_empty)
+
+ def read_to_next_unindented_line(self):
+ def is_unindented(line):
+ return (line.strip() and (len(line.lstrip()) == len(line)))
+ return self.read_to_condition(is_unindented)
+
+ def peek(self,n=0):
+ if self._l + n < len(self._str):
+ return self[self._l + n]
+ else:
+ return ''
+
+ def is_empty(self):
+ return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+ def __init__(self,docstring):
+ docstring = textwrap.dedent(docstring).split('\n')
+
+ self._doc = Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': [''],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Attributes': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'Warnings': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {}
+ }
+
+ self._parse()
+
+ def __getitem__(self,key):
+ return self._parsed_data[key]
+
+ def __setitem__(self,key,val):
+ if not self._parsed_data.has_key(key):
+ warn("Unknown section %s" % key)
+ else:
+ self._parsed_data[key] = val
+
+ def _is_at_section(self):
+ self._doc.seek_next_non_empty_line()
+
+ if self._doc.eof():
+ return False
+
+ l1 = self._doc.peek().strip() # e.g. Parameters
+
+ if l1.startswith('.. index::'):
+ return True
+
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
+ return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+ def _strip(self,doc):
+ i = 0
+ j = 0
+ for i,line in enumerate(doc):
+ if line.strip(): break
+
+ for j,line in enumerate(doc[::-1]):
+ if line.strip(): break
+
+ return doc[i:len(doc)-j]
+
+ def _read_to_next_section(self):
+ section = self._doc.read_to_next_empty_line()
+
+ while not self._is_at_section() and not self._doc.eof():
+ if not self._doc.peek(-1).strip(): # previous line was empty
+ section += ['']
+
+ section += self._doc.read_to_next_empty_line()
+
+ return section
+
+ def _read_sections(self):
+ while not self._doc.eof():
+ data = self._read_to_next_section()
+ name = data[0].strip()
+
+ if name.startswith('..'): # index section
+ yield name, data[1:]
+ elif len(data) < 2:
+ yield StopIteration
+ else:
+ yield name, self._strip(data[2:])
+
+ def _parse_param_list(self,content):
+ r = Reader(content)
+ params = []
+ while not r.eof():
+ header = r.read().strip()
+ if ' : ' in header:
+ arg_name, arg_type = header.split(' : ')[:2]
+ else:
+ arg_name, arg_type = header, ''
+
+ desc = r.read_to_next_unindented_line()
+ desc = dedent_lines(desc)
+
+ params.append((arg_name,arg_type,desc))
+
+ return params
+
+
+ _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+ def _parse_see_also(self, content):
+ """
+ func_name : Descriptive text
+ continued text
+ another_func_name : Descriptive text
+ func_name1, func_name2, :meth:`func_name`, func_name3
+
+ """
+ items = []
+
+ def parse_item_name(text):
+ """Match ':role:`name`' or 'name'"""
+ m = self._name_rgx.match(text)
+ if m:
+ g = m.groups()
+ if g[1] is None:
+ return g[3], None
+ else:
+ return g[2], g[1]
+ raise ValueError("%s is not a item name" % text)
+
+ def push_item(name, rest):
+ if not name:
+ return
+ name, role = parse_item_name(name)
+ items.append((name, list(rest), role))
+ del rest[:]
+
+ current_func = None
+ rest = []
+
+ for line in content:
+ if not line.strip(): continue
+
+ m = self._name_rgx.match(line)
+ if m and line[m.end():].strip().startswith(':'):
+ push_item(current_func, rest)
+ current_func, line = line[:m.end()], line[m.end():]
+ rest = [line.split(':', 1)[1].strip()]
+ if not rest[0]:
+ rest = []
+ elif not line.startswith(' '):
+ push_item(current_func, rest)
+ current_func = None
+ if ',' in line:
+ for func in line.split(','):
+ push_item(func, [])
+ elif line.strip():
+ current_func = line
+ elif current_func is not None:
+ rest.append(line.strip())
+ push_item(current_func, rest)
+ return items
+
+ def _parse_index(self, section, content):
+ """
+ .. index: default
+ :refguide: something, else, and more
+
+ """
+ def strip_each_in(lst):
+ return [s.strip() for s in lst]
+
+ out = {}
+ section = section.split('::')
+ if len(section) > 1:
+ out['default'] = strip_each_in(section[1].split(','))[0]
+ for line in content:
+ line = line.split(':')
+ if len(line) > 2:
+ out[line[1]] = strip_each_in(line[2].split(','))
+ return out
+
+ def _parse_summary(self):
+ """Grab signature (if given) and summary"""
+ if self._is_at_section():
+ return
+
+ summary = self._doc.read_to_next_empty_line()
+ summary_str = " ".join([s.strip() for s in summary]).strip()
+ if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+ self['Signature'] = summary_str
+ if not self._is_at_section():
+ self['Summary'] = self._doc.read_to_next_empty_line()
+ else:
+ self['Summary'] = summary
+
+ if not self._is_at_section():
+ self['Extended Summary'] = self._read_to_next_section()
+
+ def _parse(self):
+ self._doc.reset()
+ self._parse_summary()
+
+ for (section,content) in self._read_sections():
+ if not section.startswith('..'):
+ section = ' '.join([s.capitalize() for s in section.split(' ')])
+ if section in ('Parameters', 'Attributes', 'Methods',
+ 'Returns', 'Raises', 'Warns'):
+ self[section] = self._parse_param_list(content)
+ elif section.startswith('.. index::'):
+ self['index'] = self._parse_index(section, content)
+ elif section == 'See Also':
+ self['See Also'] = self._parse_see_also(content)
+ else:
+ self[section] = content
+
+ # string conversion routines
+
+ def _str_header(self, name, symbol='-'):
+ return [name, len(name)*symbol]
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ if self['Signature']:
+ return [self['Signature'].replace('*','\*')] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ if self['Summary']:
+ return self['Summary'] + ['']
+ else:
+ return []
+
+ def _str_extended_summary(self):
+ if self['Extended Summary']:
+ return self['Extended Summary'] + ['']
+ else:
+ return []
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ for param,param_type,desc in self[name]:
+ out += ['%s : %s' % (param, param_type)]
+ out += self._str_indent(desc)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += self[name]
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ if not self['See Also']: return []
+ out = []
+ out += self._str_header("See Also")
+ last_had_desc = True
+ for func, desc, role in self['See Also']:
+ if role:
+ link = ':%s:`%s`' % (role, func)
+ elif func_role:
+ link = ':%s:`%s`' % (func_role, func)
+ else:
+ link = "`%s`_" % func
+ if desc or last_had_desc:
+ out += ['']
+ out += [link]
+ else:
+ out[-1] += ", %s" % link
+ if desc:
+ out += self._str_indent([' '.join(desc)])
+ last_had_desc = True
+ else:
+ last_had_desc = False
+ out += ['']
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ out += [' :%s: %s' % (section, ', '.join(references))]
+ return out
+
+ def __str__(self, func_role=''):
+ out = []
+ out += self._str_signature()
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters','Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_section('Warnings')
+ out += self._str_see_also(func_role)
+ for s in ('Notes','References','Examples'):
+ out += self._str_section(s)
+ out += self._str_index()
+ return '\n'.join(out)
+
+
+def indent(str,indent=4):
+ indent_str = ' '*indent
+ if str is None:
+ return indent_str
+ lines = str.split('\n')
+ return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+ """Deindent a list of lines maximally"""
+ return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+ return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+ def __init__(self, func, role='func'):
+ self._f = func
+ self._role = role # e.g. "func" or "meth"
+ try:
+ NumpyDocString.__init__(self,inspect.getdoc(func) or '')
+ except ValueError, e:
+ print '*'*78
+ print "ERROR: '%s' while parsing `%s`" % (e, self._f)
+ print '*'*78
+ #print "Docstring follows:"
+ #print doclines
+ #print '='*78
+
+ if not self['Signature']:
+ func, func_name = self.get_func()
+ try:
+ # try to read signature
+ argspec = inspect.getargspec(func)
+ argspec = inspect.formatargspec(*argspec)
+ argspec = argspec.replace('*','\*')
+ signature = '%s%s' % (func_name, argspec)
+ except TypeError, e:
+ signature = '%s()' % func_name
+ self['Signature'] = signature
+
+ def get_func(self):
+ func_name = getattr(self._f, '__name__', self.__class__.__name__)
+ if inspect.isclass(self._f):
+ func = getattr(self._f, '__call__', self._f.__init__)
+ else:
+ func = self._f
+ return func, func_name
+
+ def __str__(self):
+ out = ''
+
+ func, func_name = self.get_func()
+ signature = self['Signature'].replace('*', '\*')
+
+ roles = {'func': 'function',
+ 'meth': 'method'}
+
+ if self._role:
+ if not roles.has_key(self._role):
+ print "Warning: invalid role %s" % self._role
+ out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
+ func_name)
+
+ out += super(FunctionDoc, self).__str__(func_role=self._role)
+ return out
+
+
+class ClassDoc(NumpyDocString):
+ def __init__(self,cls,modulename='',func_doc=FunctionDoc):
+ if not inspect.isclass(cls):
+ raise ValueError("Initialise using a class. Got %r" % cls)
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+ self._name = cls.__name__
+ self._func_doc = func_doc
+
+ NumpyDocString.__init__(self, pydoc.getdoc(cls))
+
+ @property
+ def methods(self):
+ return [name for name,func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and callable(func)]
+
+ def __str__(self):
+ out = ''
+ out += super(ClassDoc, self).__str__()
+ out += "\n\n"
+
+ #for m in self.methods:
+ # print "Parsing `%s`" % m
+ # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
+ # out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
+
+ return out
+
+
133 doc/sphinxext/docscrape_sphinx.py
View
@@ -0,0 +1,133 @@
+import re, inspect, textwrap, pydoc
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+ # string conversion routines
+ def _str_header(self, name, symbol='`'):
+ return ['.. rubric:: ' + name, '']
+
+ def _str_field_list(self, name):
+ return [':' + name + ':']
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ return ['']
+ if self['Signature']:
+ return ['``%s``' % self['Signature']] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Extended Summary'] + ['']
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_field_list(name)
+ out += ['']
+ for param,param_type,desc in self[name]:
+ out += self._str_indent(['**%s** : %s' % (param.strip(),
+ param_type)])
+ out += ['']
+ out += self._str_indent(desc,8)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += ['']
+ content = textwrap.dedent("\n".join(self[name])).split("\n")
+ out += content
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ out = []
+ if self['See Also']:
+ see_also = super(SphinxDocString, self)._str_see_also(func_role)
+ out = ['.. seealso::', '']
+ out += self._str_indent(see_also[2:])
+ return out
+
+ def _str_warnings(self):
+ out = []
+ if self['Warnings']:
+ out = ['.. warning::', '']
+ out += self._str_indent(self['Warnings'])
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ if len(idx) == 0:
+ return out
+
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ elif section == 'refguide':
+ out += [' single: %s' % (', '.join(references))]
+ else:
+ out += [' %s: %s' % (section, ','.join(references))]
+ return out
+
+ def _str_references(self):
+ out = []
+ if self['References']:
+ out += self._str_header('References')
+ if isinstance(self['References'], str):
+ self['References'] = [self['References']]
+ out.extend(self['References'])
+ out += ['']
+ return out
+
+ def __str__(self, indent=0, func_role="obj"):
+ out = []
+ out += self._str_signature()
+ out += self._str_index() + ['']
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Attributes', 'Methods',
+ 'Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_warnings()
+ out += self._str_see_also(func_role)
+ out += self._str_section('Notes')
+ out += self._str_references()
+ out += self._str_section('Examples')
+ out = self._str_indent(out,indent)
+ return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+ pass
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+ pass
+
+def get_doc_object(obj, what=None):
+ if what is None:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif callable(obj):
+ what = 'function'
+ else:
+ what = 'object'
+ if what == 'class':
+ return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc)
+ elif what in ('function', 'method'):
+ return SphinxFunctionDoc(obj, '')
+ else:
+ return SphinxDocString(pydoc.getdoc(obj))
111 doc/sphinxext/numpydoc.py
View
@@ -0,0 +1,111 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+ reference_offset=[0]):
+ if what == 'module':
+ # Strip top title
+ title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+ re.I|re.S)
+ lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
+ else:
+ doc = get_doc_object(obj, what)
+ lines[:] = str(doc).split("\n")
+
+ if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+ obj.__name__:
+ v = dict(full_name=obj.__name__)
+ lines += [''] + (app.config.numpydoc_edit_link % v).split("\n")
+
+ # replace reference numbers so that there are no duplicates
+ references = []
+ for l in lines:
+ l = l.strip()
+ if l.startswith('.. ['):
+ try:
+ references.append(int(l[len('.. ['):l.index(']')]))
+ except ValueError:
+ print "WARNING: invalid reference in %s docstring" % name
+
+ # Start renaming from the biggest number, otherwise we may
+ # overwrite references.
+ references.sort()
+ if references:
+ for i, line in enumerate(lines):
+ for r in references:
+ new_r = reference_offset[0] + r
+ lines[i] = lines[i].replace('[%d]_' % r,
+ '[%d]_' % new_r)
+ lines[i] = lines[i].replace('.. [%d]' % r,
+ '.. [%d]' % new_r)
+
+ reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+ # Do not try to inspect classes that don't define `__init__`
+ if (inspect.isclass(obj) and
+ 'initializes x; see ' in pydoc.getdoc(obj.__init__)):
+ return '', ''
+
+ if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+ if not hasattr(obj, '__doc__'): return
+
+ doc = SphinxDocString(pydoc.getdoc(obj))
+ if doc['Signature']:
+ sig = re.sub("^[^(]*", "", doc['Signature'])
+ return sig, ''
+
+def initialize(app):
+ try:
+ app.connect('autodoc-process-signature', mangle_signature)
+ except:
+ monkeypatch_sphinx_ext_autodoc()
+
+def setup(app, get_doc_object_=get_doc_object):
+ global get_doc_object
+ get_doc_object = get_doc_object_
+
+ app.connect('autodoc-process-docstring', mangle_docstrings)
+ app.connect('builder-inited', initialize)
+ app.add_config_value('numpydoc_edit_link', None, True)
+
+#------------------------------------------------------------------------------
+# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
+#------------------------------------------------------------------------------
+
+def monkeypatch_sphinx_ext_autodoc():
+ global _original_format_signature
+ import sphinx.ext.autodoc
+
+ if sphinx.ext.autodoc.format_signature is our_format_signature:
+ return
+
+ print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
+ _original_format_signature = sphinx.ext.autodoc.format_signature
+ sphinx.ext.autodoc.format_signature = our_format_signature
+
+def our_format_signature(what, obj):
+ r = mangle_signature(None, what, None, obj, None, None, None)
+ if r is not None:
+ return r[0]
+ else:
+ return _original_format_signature(what, obj)
87 doc/sphinxext/only_directives.py
View
@@ -0,0 +1,87 @@
+#
+# A pair of directives for inserting content that will only appear in
+# either html or latex.
+#
+
+from docutils.nodes import Body, Element
+from docutils.writers.html4css1 import HTMLTranslator
+from sphinx.latexwriter import LaTeXTranslator
+from docutils.parsers.rst import directives
+
+class html_only(Body, Element):
+ pass
+
+class latex_only(Body, Element):
+ pass
+
+def run(content, node_class, state, content_offset):
+ text = '\n'.join(content)
+ node = node_class(text)
+ state.nested_parse(content, content_offset, node)
+ return [node]
+
+try:
+ from docutils.parsers.rst import Directive
+except ImportError:
+ from docutils.parsers.rst.directives import _directives
+
+ def html_only_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(content, html_only, state, content_offset)
+
+ def latex_only_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(content, latex_only, state, content_offset)
+
+ for func in (html_only_directive, latex_only_directive):
+ func.content = 1
+ func.options = {}
+ func.arguments = None
+
+ _directives['htmlonly'] = html_only_directive
+ _directives['latexonly'] = latex_only_directive
+else:
+ class OnlyDirective(Directive):
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = True
+ option_spec = {}
+
+ def run(self):
+ self.assert_has_content()
+ return run(self.content, self.node_class,
+ self.state, self.content_offset)
+
+ class HtmlOnlyDirective(OnlyDirective):
+ node_class = html_only
+
+ class LatexOnlyDirective(OnlyDirective):
+ node_class = latex_only
+
+ directives.register_directive('htmlonly', HtmlOnlyDirective)
+ directives.register_directive('latexonly', LatexOnlyDirective)
+
+def setup(app):
+ app.add_node(html_only)
+ app.add_node(latex_only)
+
+ # Add visit/depart methods to HTML-Translator:
+ def visit_perform(self, node):
+ pass
+ def depart_perform(self, node):
+ pass
+ def visit_ignore(self, node):
+ node.children = []
+ def depart_ignore(self, node):
+ node.children = []
+
+ HTMLTranslator.visit_html_only = visit_perform
+ HTMLTranslator.depart_html_only = depart_perform
+ HTMLTranslator.visit_latex_only = visit_ignore
+ HTMLTranslator.depart_latex_only = depart_ignore
+
+ LaTeXTranslator.visit_html_only = visit_ignore
+ LaTeXTranslator.depart_html_only = depart_ignore
+ LaTeXTranslator.visit_latex_only = visit_perform
+ LaTeXTranslator.depart_latex_only = depart_perform
162 doc/sphinxext/phantom_import.py
View
@@ -0,0 +1,162 @@
+"""
+==============
+phantom_import
+==============
+
+Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
+extensions to use docstrings loaded from an XML file.
+
+This extension loads an XML file in the Pydocweb format [1] and
+creates a dummy module that contains the specified docstrings. This
+can be used to get the current docstrings from a Pydocweb instance
+without needing to rebuild the documented module.
+
+.. [1] http://code.google.com/p/pydocweb
+
+"""
+import imp, sys, compiler, types, os, inspect, re
+
+def setup(app):
+ app.connect('builder-inited', initialize)
+ app.add_config_value('phantom_import_file', None, True)
+
+def initialize(app):
+ fn = app.config.phantom_import_file
+ if (fn and os.path.isfile(fn)):
+ print "[numpydoc] Phantom importing modules from", fn, "..."
+ import_phantom_module(fn)
+
+#------------------------------------------------------------------------------
+# Creating 'phantom' modules from an XML description
+#------------------------------------------------------------------------------
+def import_phantom_module(xml_file):
+ """
+ Insert a fake Python module to sys.modules, based on a XML file.
+
+ The XML file is expected to conform to Pydocweb DTD. The fake
+ module will contain dummy objects, which guarantee the following:
+
+ - Docstrings are correct.
+ - Class inheritance relationships are correct (if present in XML).
+ - Function argspec is *NOT* correct (even if present in XML).
+ Instead, the function signature is prepended to the function docstring.
+ - Class attributes are *NOT* correct; instead, they are dummy objects.
+
+ Parameters
+ ----------
+ xml_file : str
+ Name of an XML file to read
+
+ """
+ import lxml.etree as etree