Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Comparing changes

Choose two branches to see what's changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
base fork: translate/translate
...
head fork: translate/translate
Checking mergeability… Don't worry, you can still create the pull request.
  • 4 commits
  • 106 files changed
  • 0 commit comments
  • 1 contributor
Commits on Mar 28, 2012
@julen julen s/doc/docs/ and remove references to SVN. e040170
@julen julen Farewell, epydoc. 6502945
@julen julen Ignore Sphinx's build directory. e48c3f9
@julen julen Convert docstrings from epytext to reST.
This will allow Sphinx to properly recognize the formatting. There might
be small bugs, not difficult to catch and fix them once the docs are built.
4ad12b3
Showing with 761 additions and 1,004 deletions.
  1. +3 −0  .gitignore
  2. +0 −18 docs/README
  3. +0 −14 docs/api/index.html
  4. +0 −111 docs/epydoc-config.ini
  5. +0 −31 docs/gen_api_docs.sh
  6. +0 −34 docs/index.html
  7. +0 −14 docs/user/toolkit-index.html
  8. +4 −5 setup.py
  9. +0 −5 translate/convert/__init__.py
  10. +10 −10 translate/convert/accesskey.py
  11. +6 −6 translate/convert/factory.py
  12. +1 −1  translate/convert/ical2po.py
  13. +1 −1  translate/convert/ini2po.py
  14. +3 −3 translate/convert/json2po.py
  15. +4 −4 translate/convert/po2tiki.py
  16. +1 −1  translate/convert/sub2po.py
  17. +6 −6 translate/convert/tiki2po.py
  18. +7 −7 translate/filters/checks.py
  19. +6 −6 translate/filters/decoration.py
  20. +1 −3 translate/lang/__init__.py
  21. +1 −1  translate/lang/af.py
  22. +1 −1  translate/lang/am.py
  23. +1 −1  translate/lang/ar.py
  24. +1 −1  translate/lang/bn.py
  25. +1 −1  translate/lang/code_or.py
  26. +6 −3 translate/lang/common.py
  27. +12 −12 translate/lang/data.py
  28. +1 −1  translate/lang/de.py
  29. +1 −1  translate/lang/el.py
  30. +1 −1  translate/lang/fa.py
  31. +1 −1  translate/lang/factory.py
  32. +1 −1  translate/lang/fr.py
  33. +1 −1  translate/lang/gu.py
  34. +1 −1  translate/lang/he.py
  35. +1 −1  translate/lang/hi.py
  36. +1 −1  translate/lang/hy.py
  37. +9 −9 translate/lang/identify.py
  38. +1 −1  translate/lang/ja.py
  39. +1 −1  translate/lang/km.py
  40. +1 −1  translate/lang/kn.py
  41. +1 −1  translate/lang/ko.py
  42. +1 −1  translate/lang/ml.py
  43. +1 −1  translate/lang/mr.py
  44. +1 −1  translate/lang/ne.py
  45. +1 −1  translate/lang/pa.py
  46. +9 −9 translate/lang/poedit.py
  47. +1 −1  translate/lang/si.py
  48. +1 −1  translate/lang/sv.py
  49. +1 −1  translate/lang/ta.py
  50. +1 −1  translate/lang/te.py
  51. +8 −8 translate/lang/team.py
  52. +1 −1  translate/lang/th.py
  53. +1 −1  translate/lang/ug.py
  54. +1 −1  translate/lang/ur.py
  55. +1 −1  translate/lang/vi.py
  56. +1 −1  translate/lang/zh.py
  57. +2 −2 translate/misc/file_discovery.py
  58. +10 −10 translate/misc/optrecurse.py
  59. +1 −1  translate/misc/test_optrecurse.py
  60. +100 −100 translate/search/indexing/CommonIndexer.py
  61. +78 −78 translate/search/indexing/PyLuceneIndexer.py
  62. +52 −52 translate/search/indexing/PyLuceneIndexer1.py
  63. +82 −82 translate/search/indexing/XapianIndexer.py
  64. +10 −10 translate/search/indexing/__init__.py
  65. +6 −6 translate/search/indexing/test_indexers.py
  66. +7 −7 translate/search/match.py
  67. +1 −1  translate/search/terminology.py
  68. +0 −9 translate/storage/__init__.py
  69. +49 −55 translate/storage/base.py
  70. +10 −10 translate/storage/bundleprojstore.py
  71. +8 −8 translate/storage/catkeys.py
  72. +4 −4 translate/storage/cpo.py
  73. +12 −12 translate/storage/dtd.py
  74. +4 −4 translate/storage/factory.py
  75. +4 −4 translate/storage/fpo.py
  76. +1 −1  translate/storage/html.py
  77. +3 −3 translate/storage/ical.py
  78. +1 −1  translate/storage/ini.py
  79. +6 −6 translate/storage/omegat.py
  80. +7 −7 translate/storage/oo.py
  81. +5 −5 translate/storage/php.py
  82. +1 −1  translate/storage/pocommon.py
  83. +10 −10 translate/storage/poheader.py
  84. +15 −15 translate/storage/project.py
  85. +19 −18 translate/storage/projstore.py
  86. +22 −22 translate/storage/properties.py
  87. +8 −8 translate/storage/pypo.py
  88. +8 −8 translate/storage/qm.py
  89. +8 −8 translate/storage/qph.py
  90. +1 −1  translate/storage/rc.py
  91. +2 −5 translate/storage/statistics.py
  92. +0 −3  translate/storage/statsdb.py
  93. +3 −2 translate/storage/subtitles.py
  94. +1 −1  translate/storage/test_php.py
  95. +4 −4 translate/storage/tiki.py
  96. +5 −5 translate/storage/trados.py
  97. +4 −4 translate/storage/ts.py
  98. +13 −13 translate/storage/ts2.py
  99. +5 −5 translate/storage/utx.py
  100. +13 −13 translate/storage/wordfast.py
  101. +6 −6 translate/storage/workflow.py
  102. +6 −6 translate/storage/xliff.py
  103. +3 −3 translate/tools/phppo2pypo.py
  104. +7 −7 translate/tools/pocount.py
  105. +3 −3 translate/tools/pomerge.py
  106. +3 −3 translate/tools/pypo2phppo.py
View
3  .gitignore
@@ -1,5 +1,8 @@
# /
/LICENSE
+# /docs/
+/docs/_build/
+
# General
*.pyc
View
18 docs/README
@@ -1,18 +0,0 @@
-User documentation
-==================
-The files under the user/ directory is user documentation that is generated
-from the translate toolkit section of the Translate wiki at
- http://translate.sourceforge.net/wiki/toolkit/index
-
-The online versions should always be more up to date, but might already reflect
-changes in versions newer than the one you downloaded. Please contribute to the
-wiki rather than working on these files here.
-
-API documenation
-================
-The files under the api/ directory is API documentation for programmers that
-want to use the toolkit as a library. It is generated with epydoc as part of
-the release process. The files packaged with a released version corresponds to
-that particular release. You might want to make sure where current developement
-is heading and rather work against the current development trunk in version
-control.
View
14 docs/api/index.html
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2//EN">
-<html>
-<head>
-<meta name="generator" content=
-"HTML Tidy for Linux/x86 (vers 1 September 2005), see www.w3.org">
-<title></title>
-</head>
-<body>
-The API documentation will be put here as part of the release
-process. You can view the online documentation here:
-<a href="http://translate.sourceforge.net/doc/api/">
-http://translate.sourceforge.net/doc/api/</a>
-</body>
-</html>
View
111 docs/epydoc-config.ini
@@ -1,111 +0,0 @@
-[epydoc] # Epydoc section marker (required by ConfigParser)
-
-# modules
-# The list of objects to document. Objects can be named using
-# dotted names, module filenames, or package directory names.
-# Alases for this option include "objects" and "values".
-modules: translate
-
-# output
-# The type of output that should be generated. Should be one
-# of: html, text, latex, dvi, ps, pdf.
-output: html
-
-# target
-# The path to the output directory. May be relative or absolute.
-#target: apidocs/
-
-# docformat
-# The default markup language for docstrings, for modules that do
-# not define __docformat__. Defaults to epytext.
-docformat: epytext
-
-# css
-# The CSS stylesheet for HTML output. Can be the name of a builtin
-# stylesheet, or the name of a file.
-css: white
-
-# name
-# The documented project's name.
-name: Translate Toolkit
-
-# url
-# The documented project's URL.
-url: http://translate.sourceforge.net/wiki/toolkit/index
-
-# link
-# HTML code for the project link in the navigation bar. If left
-# unspecified, the project link will be generated based on the
-# project's name and URL.
-# link: <a href="somewhere">My Cool Project</a>
-
-# top
-# The "top" page for the documentation. Can be a URL, the name
-# of a module or class, or one of the special names "trees.html",
-# "indices.html", or "help.html"
-# top: translate.storage
-
-# help
-# An alternative help file. The named file should contain the
-# body of an HTML file; navigation bars will be added to it.
-# help: my_helpfile.html
-
-# frames
-# Whether or not to include a frames-based table of contents.
-frames: yes
-
-# private
-# Whether or not to inclue private variables. (Even if included,
-# private variables will be hidden by default.)
-private: yes
-
-# imports
-# Whether or not to list each module's imports.
-imports: yes
-
-# verbosity
-# An integer indicating how verbose epydoc should be. The default
-# value is 0; negative values will supress warnings and errors;
-# positive values will give more verbose output.
-verbosity: 1
-
-# parse
-# Whether or not parsing should be used to examine objects.
-parse: yes
-
-# introspect
-# Whether or not introspection should be used to examine objects.
-introspect: yes
-
-# graph
-# The list of graph types that should be automatically included
-# in the output. Graphs are generated using the Graphviz "dot"
-# executable. Graph types include: "classtree", "callgraph",
-# "umlclass". Use "all" to include all graph types
-graph: all
-
-# dotpath
-# The path to the Graphviz "dot" executable, used to generate
-# graphs.
-# dotpath: /usr/bin/
-
-# sourcecode
-# Whether or not to include syntax highlighted source code in
-# the output (HTML only).
-sourcecode: yes
-
-# pstat
-# The name of one or more pstat files (generated by the profile
-# or hotshot module). These are used to generate call graphs.
-# pstat: profile.out
-
-# separate-classes
-# Whether each class should be listed in its own section when
-# generating LaTeX or PDF output.
-separate-classes: no
-
-# The format for showing inheritance objects, should be one
-# of: grouped, listed, included.
-inheritance: listed
-exclude: test_*
-
View
31 docs/gen_api_docs.sh
@@ -1,31 +0,0 @@
-#!/bin/sh
-
-# The translate toolkit must be in your PYTHONPATH when you
-# build these documents. Either install them or run:
-# . setpath
-#
-# The script will then find them, build docs and export them
-# to sourceforge.
-#
-# You should also have a setup in .ssh/config that defines
-# $sfaccount with your sourceforge shell login details for
-# the translate project.
-#
-# EPYDOC
-# ======
-# See: http://epydoc.sourceforge.net/manual-epytext.html
-# and: http://epydoc.sourceforge.net/fields.html#fields
-
-docdir=`dirname $0`
-outputdir=$docdir/api/
-
-rm -rf $outputdir
-epydoc --config=$docdir/epydoc-config.ini --output=$outputdir
-
-
-##To get the new documentation on SourceForge,
-##create a new shell account and update the API docs
-
-sfaccount=sftranslate-shell
-#ssh $sfaccount create
-#rsync -azv -e ssh --delete $outputdir $sfaccount:translate/htdocs/doc/api
View
34 docs/index.html
@@ -1,34 +0,0 @@
-<html>
- <head>
- <title>Translate Toolkit - documentation</title>
- </head>
- <body>
- <p>
- The Translate Toolkit is a set of tools designed to work with XLIFF and
- Gettext PO files, and can manipulate several other formats.
- </p>
- <p>
- The Toolkit provides a platform for developing localisation tools and
- is used as such a platform by the following projects:
- <ul>
- <li><a href="http://translate.sourceforge.net/wiki/virtaal/index">Virtaal</a></li>
- <li><a href="http://translate.sourceforge.net/wiki/pootle/index">Pootle</a></li>
- <li><a href="http://open-tran.eu/">Open-Tran.eu</a> - providing translation memory lookup</li>
- </ul>
- The Translate Toolkit is hosted at
- <a href="http://translate.sourceforge.net/">translate.sourceforge.net</a>.
- </p>
-
- <p>
- The documentation packaged with the Translate Toolkit have two parts:
- </p>
- <ul>
- <li><a href="user/toolkit-index.html">User documentation</a></li>
- <li><a href="api/index.html">API documentation</a> (for programmers)</li>
- </ul>
-
- The most up-to-date documentation about the Translate Toolkit is
- available online in the <a href="http://translate.sourceforge.net/wiki/">
- Translate wiki</a>. Please contribute any improvements there.
- </body>
-</html>
View
14 docs/user/toolkit-index.html
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2//EN">
-<html>
-<head>
-<meta name="generator" content=
-"HTML Tidy for Linux/x86 (vers 1 September 2005), see www.w3.org">
-<title></title>
-</head>
-<body>
-The user documentation will be put here as part of the release
-process. You can view the online user documentation here:
-<a href="http://translate.sourceforge.net/wiki/toolkit/index">
-http://translate.sourceforge.net/wiki/toolkit/index</a>
-</body>
-</html>
View
9 setup.py
@@ -180,7 +180,7 @@ def create(self, pathname=None):
print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path))
print >> ofi
print >> ofi, r"[Icons]"
- print >> ofi, r'Name: "{group}\Documentation"; Filename: "{app}\doc\index.html";'
+ print >> ofi, r'Name: "{group}\Documentation"; Filename: "{app}\docs\index.html";'
print >> ofi, r'Name: "{group}\Translate Toolkit Command Prompt"; Filename: "cmd.exe"'
print >> ofi, r'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name
print >> ofi
@@ -265,11 +265,10 @@ def getdatafiles():
def listfiles(srcdir):
return join(sitepackages, srcdir), [join(srcdir, f) for f in os.listdir(srcdir) if os.path.isfile(join(srcdir, f))]
docfiles = []
- for subdir in ['doc', 'share']:
+ for subdir in ['docs', 'share']:
docwalk=os.walk(os.path.join('translate', subdir))
for docs in docwalk:
- if not '.svn' in docs[0]:
- docfiles.append(listfiles(docs[0]))
+ docfiles.append(listfiles(docs[0]))
datafiles += docfiles
return datafiles
@@ -305,7 +304,7 @@ def buildmanifest_in(file, scripts):
for scriptname in scripts:
print >>file, "include %s" % scriptname
print >> file, "# include our documentation"
- print >> file, "graft translate/doc"
+ print >> file, "graft translate/docs"
print >> file, "graft translate/share"
# wordlist, portal are in the source tree but unconnected to the python code
print >>file, "prune wordlist"
View
5 translate/convert/__init__.py
@@ -21,9 +21,4 @@
"""translate.convert is part of the translate package
It contains code to convert between different storage formats for localizations
-@group XLIFF: *xliff*
-@group Bilingual: pot2po po2tmx oo2po po2oo csv2tbx *wordfast* *ts*
-@group Monolingual: *prop* *dtd* csv2po po2csv *html* *ical* *ini* *rc* *txt* moz2po po2moz *php* *sub* *symb* *monopo* *tiki* *web2py* *lang* skype
-@group Support: accesskey convert
-@group Other: poreplace
"""
View
20 translate/convert/accesskey.py
@@ -31,10 +31,10 @@ def extract(string, accesskey_marker=DEFAULT_ACCESSKEY_MARKER):
The function will also try to ignore &entities; which would obviously not
contain accesskeys.
- @type string: Unicode
- @param string: A string that might contain a label with accesskey marker
- @type accesskey_marker: Char
- @param accesskey_marker: The character that is used to prefix an access key
+ :type string: Unicode
+ :param string: A string that might contain a label with accesskey marker
+ :type accesskey_marker: Char
+ :param accesskey_marker: The character that is used to prefix an access key
"""
assert isinstance(string, unicode)
assert isinstance(accesskey_marker, unicode)
@@ -65,12 +65,12 @@ def combine(label, accesskey,
We place an accesskey marker before the accesskey in the label and this creates a
string with the two combined e.g. "File" + "F" = "&File"
- @type label: unicode
- @param label: a label
- @type accesskey: unicode char
- @param accesskey: The accesskey
- @rtype: unicode or None
- @return: label+accesskey string or None if uncombineable
+ :type label: unicode
+ :param label: a label
+ :type accesskey: unicode char
+ :param accesskey: The accesskey
+ :rtype: unicode or None
+ :return: label+accesskey string or None if uncombineable
"""
assert isinstance(label, unicode)
assert isinstance(accesskey, unicode)
View
12 translate/convert/factory.py
@@ -125,12 +125,12 @@ def convert(inputfile, template=None, options=None, convert_options=None):
that can handle the input file (and the format/extension it gives as
output) is used.
- @type inputfile: file
- @param inputfile: The input file to be converted
- @type template: file
- @param template: Template file to use during conversion
- @type options: dict (default: None)
- @param options: Valid options are:
+ :type inputfile: file
+ :param inputfile: The input file to be converted
+ :type template: file
+ :param template: Template file to use during conversion
+ :type options: dict (default: None)
+ :param options: Valid options are:
- in_ext: The extension (format) of the input file.
- out_ext: The extension (format) to use for the output file.
- templ_ext: The extension (format) of the template file.
View
2  translate/convert/ical2po.py
@@ -82,7 +82,7 @@ def convert_unit(self, input_unit, commenttype):
def convertical(input_file, output_file, template_file, pot=False, duplicatestyle="msgctxt"):
- """Reads in L{input_file} using iCal, converts using L{ical2po}, writes to L{output_file}"""
+ """Reads in :ref:`input_file` using iCal, converts using :ref:`ical2po`, writes to :ref:`output_file`"""
input_store = ical.icalfile(input_file)
convertor = ical2po()
if template_file is None:
View
2  translate/convert/ini2po.py
@@ -81,7 +81,7 @@ def convert_unit(self, input_unit, commenttype):
def convertini(input_file, output_file, template_file, pot=False, duplicatestyle="msgctxt", dialect="default"):
- """Reads in L{input_file} using ini, converts using L{ini2po}, writes to L{output_file}"""
+ """Reads in :ref:`input_file` using ini, converts using :ref:`ini2po`, writes to :ref:`output_file`"""
from translate.storage import ini
input_store = ini.inifile(input_file, dialect=dialect)
convertor = ini2po()
View
6 translate/convert/json2po.py
@@ -75,7 +75,7 @@ def merge_store(self, template_store, input_store, blankmsgstr=False,
def convert_unit(self, input_unit, commenttype):
"""Converts a JSON unit to a PO unit
- @return: None if empty or not for translation
+ :return: None if empty or not for translation
"""
if input_unit is None:
return None
@@ -89,8 +89,8 @@ def convert_unit(self, input_unit, commenttype):
def convertjson(input_file, output_file, template_file, pot=False,
duplicatestyle="msgctxt", dialect="default", filter=None):
- """Reads in L{input_file} using jsonl10n, converts using L{json2po},
- writes to L{output_file}"""
+ """Reads in :ref:`input_file` using jsonl10n, converts using :ref:`json2po`,
+ writes to :ref:`output_file`"""
from translate.storage import jsonl10n
if filter is not None:
filter = filter.split(',')
View
8 translate/convert/po2tiki.py
@@ -31,7 +31,7 @@ class po2tiki:
def convertstore(self, thepofile):
"""Converts a given (parsed) po file to a tiki file.
- @param thepofile: a pofile pre-loaded with input data
+ :param thepofile: a pofile pre-loaded with input data
"""
thetargetfile = tiki.TikiStore()
for unit in thepofile.units:
@@ -55,9 +55,9 @@ def convertstore(self, thepofile):
def convertpo(inputfile, outputfile, template=None):
"""Converts from po file format to tiki.
- @param inputfile: file handle of the source
- @param outputfile: file handle to write to
- @param template: unused
+ :param inputfile: file handle of the source
+ :param outputfile: file handle to write to
+ :param template: unused
"""
inputstore = po.pofile(inputfile)
if inputstore.isempty():
View
2  translate/convert/sub2po.py
@@ -81,7 +81,7 @@ def convert_unit(input_unit, commenttype):
def convertsub(input_file, output_file, template_file=None, pot=False, duplicatestyle="msgctxt"):
- """Reads in L{input_file} using translate.subtitles, converts using L{sub2po}, writes to L{output_file}"""
+ """Reads in :ref:`input_file` using translate.subtitles, converts using :ref:`sub2po`, writes to :ref:`output_file`"""
from translate.storage import subtitles
input_store = subtitles.SubtitleFile(input_file)
if template_file is None:
View
12 translate/convert/tiki2po.py
@@ -30,14 +30,14 @@ class tiki2po:
def __init__(self, includeunused=False):
"""
- @param includeunused: On conversion, should the "unused" section be preserved? Default: False
+ :param includeunused: On conversion, should the "unused" section be preserved? Default: False
"""
self.includeunused = includeunused
def convertstore(self, thetikifile):
"""Converts a given (parsed) tiki file to a po file.
- @param thetikifile: a tikifile pre-loaded with input data
+ :param thetikifile: a tikifile pre-loaded with input data
"""
thetargetfile = po.pofile()
@@ -61,10 +61,10 @@ def convertstore(self, thetikifile):
def converttiki(inputfile, outputfile, template=None, includeunused=False):
"""Converts from tiki file format to po.
- @param inputfile: file handle of the source
- @param outputfile: file handle to write to
- @param template: unused
- @param includeunused: Include the "usused" section of the tiki file? Default: False
+ :param inputfile: file handle of the source
+ :param outputfile: file handle to write to
+ :param template: unused
+ :param includeunused: Include the "usused" section of the tiki file? Default: False
"""
convertor = tiki2po(includeunused=includeunused)
inputstore = tiki.TikiStore(inputfile)
View
14 translate/filters/checks.py
@@ -26,7 +26,7 @@
(source, target) translation pairs.
When adding a new test here, please document and explain the behaviour on the
-U{wiki <http://translate.sourceforge.net/wiki/toolkit/pofilter_tests>}.
+`wiki <http://translate.sourceforge.net/wiki/toolkit/pofilter_tests>`_.
"""
import re
@@ -188,9 +188,9 @@ def __init__(self, targetlanguage=None, accelmarkers=None, varmatches=None,
def _init_list(self, list):
"""initialise configuration paramaters that are lists
- @type list: List
- @param list: None (we'll initialise a blank list) or a list paramater
- @rtype: List
+ :type list: List
+ :param list: None (we'll initialise a blank list) or a list paramater
+ :rtype: List
"""
if list is None:
list = []
@@ -199,9 +199,9 @@ def _init_list(self, list):
def _init_default(self, param, default):
"""initialise parameters that can have default options
- @param param: the user supplied paramater value
- @param default: default values when param is not specified
- @return: the paramater as specified by the user of the default settings
+ :param param: the user supplied paramater value
+ :param default: default values when param is not specified
+ :return: the paramater as specified by the user of the default settings
"""
if param is None:
return default
View
12 translate/filters/decoration.py
@@ -85,12 +85,12 @@ def ispurepunctuation(str1):
def isvalidaccelerator(accelerator, acceptlist=None):
"""returns whether the given accelerator character is valid
- @type accelerator: character
- @param accelerator: A character to be checked for accelerator validity
- @type acceptlist: String
- @param acceptlist: A list of characters that are permissible as accelerators
- @rtype: Boolean
- @return: True if the supplied character is an acceptable accelerator
+ :type accelerator: character
+ :param accelerator: A character to be checked for accelerator validity
+ :type acceptlist: String
+ :param acceptlist: A list of characters that are permissible as accelerators
+ :rtype: Boolean
+ :return: True if the supplied character is an acceptable accelerator
"""
assert isinstance(accelerator, unicode)
assert isinstance(acceptlist, unicode) or acceptlist is None
View
4 translate/lang/__init__.py
@@ -21,7 +21,7 @@
"""lang contains classes that represent languages and provides language specific
information.
-All classes inherit from the parent class called L{common}. The type of data
+All classes inherit from the parent class called :ref:`common`. The type of data
includes:
- language codes
- language name
@@ -29,6 +29,4 @@
- punctuation transformation
- etc
-@group Common Language Functionality: common data poedit identify factory
-@group Languages: *
"""
View
2  translate/lang/af.py
@@ -20,7 +20,7 @@
"""This module represents Afrikaans language.
-For more information, see U{http://en.wikipedia.org/wiki/Afrikaans_language}
+For more information, see http://en.wikipedia.org/wiki/Afrikaans_language
"""
import re
View
2  translate/lang/am.py
@@ -20,7 +20,7 @@
"""This module represents Amharic language.
-For more information, see U{http://en.wikipedia.org/wiki/Amharic_language}
+For more information, see http://en.wikipedia.org/wiki/Amharic_language
"""
import re
View
2  translate/lang/ar.py
@@ -20,7 +20,7 @@
"""This module represents Arabic language.
-For more information, see U{http://en.wikipedia.org/wiki/Arabic_language}
+For more information, see http://en.wikipedia.org/wiki/Arabic_language
"""
import re
View
2  translate/lang/bn.py
@@ -20,7 +20,7 @@
"""This module represents Bengali language.
-For more information, see U{http://en.wikipedia.org/wiki/Bengali_language}
+For more information, see http://en.wikipedia.org/wiki/Bengali_language
"""
import re
View
2  translate/lang/code_or.py
@@ -20,7 +20,7 @@
"""This module represents Oriya language.
-For more information, see U{http://en.wikipedia.org/wiki/Oriya_language}
+For more information, see http://en.wikipedia.org/wiki/Oriya_language
"""
import re
View
9 translate/lang/common.py
@@ -88,15 +88,18 @@ class Common(object):
0 is not a valid value - it must be overridden.
Any positive integer is valid (it should probably be between 1 and 6)
- @see: L{data}
+
+ .. seealso:: :ref:`data`
"""
pluralequation = "0"
"""The plural equation for selection of plural forms.
This is used for PO files to fill into the header.
- @see: U{Gettext manual<http://www.gnu.org/software/gettext/manual/html_node/gettext_150.html#Plural-forms>}
- @see: L{data}
+
+ .. seealso::
+
+ `Gettext manual <http://www.gnu.org/software/gettext/manual/html_node/gettext_150.html#Plural-forms>`_, :ref:`data`
"""
# Don't change these defaults of nplurals or pluralequation willy-nilly:
# some code probably depends on these for unrecognised languages
View
24 translate/lang/data.py
@@ -195,10 +195,10 @@ def simplercode(code):
codes, for example.
@see:
- - U{http://www.rfc-editor.org/rfc/bcp/bcp47.txt}
- - U{http://www.rfc-editor.org/rfc/rfc4646.txt}
- - U{http://www.rfc-editor.org/rfc/rfc4647.txt}
- - U{http://www.w3.org/International/articles/language-tags/}
+ - http://www.rfc-editor.org/rfc/bcp/bcp47.txt
+ - http://www.rfc-editor.org/rfc/rfc4646.txt
+ - http://www.rfc-editor.org/rfc/rfc4647.txt
+ - http://www.w3.org/International/articles/language-tags/
"""
if not code:
return code
@@ -248,7 +248,7 @@ def languagematch(languagecode, otherlanguagecode):
def tr_lang(langcode=None):
- """Gives a function that can translate a language name, even in the form C{"language (country)"},
+ """Gives a function that can translate a language name, even in the form ``"language (country)"``,
into the language with iso code langcode, or the system language if no language is specified."""
langfunc = gettext_lang(langcode)
countryfunc = gettext_country(langcode)
@@ -319,9 +319,9 @@ def gettext_country(langcode=None):
def normalize(string, normal_form="NFC"):
"""Return a unicode string in its normalized form
- @param string: The string to be normalized
- @param normal_form: NFC (default), NFD, NFKC, NFKD
- @return: Normalized string
+ :param string: The string to be normalized
+ :param normal_form: NFC (default), NFD, NFKC, NFKD
+ :return: Normalized string
"""
if string is None:
return None
@@ -333,10 +333,10 @@ def normalize(string, normal_form="NFC"):
def forceunicode(string):
"""Ensures that the string is in unicode.
- @param string: A text string
- @type string: Unicode, String
- @return: String converted to Unicode and normalized as needed.
- @rtype: Unicode
+ :param string: A text string
+ :type string: Unicode, String
+ :return: String converted to Unicode and normalized as needed.
+ :rtype: Unicode
"""
if string is None:
return None
View
2  translate/lang/de.py
@@ -20,7 +20,7 @@
"""This module represents German language.
-For more information, see U{http://en.wikipedia.org/wiki/German_language}
+For more information, see http://en.wikipedia.org/wiki/German_language
"""
from translate.lang import common
View
2  translate/lang/el.py
@@ -20,7 +20,7 @@
"""This module represents Greek language.
-For more information, see U{http://en.wikipedia.org/wiki/Greek_language}
+For more information, see http://en.wikipedia.org/wiki/Greek_language
"""
import re
View
2  translate/lang/fa.py
@@ -20,7 +20,7 @@
"""This module represents Persian language.
-For more information, see U{http://en.wikipedia.org/wiki/Persian_language}
+For more information, see http://en.wikipedia.org/wiki/Persian_language
"""
from translate.lang import common
View
2  translate/lang/factory.py
@@ -29,7 +29,7 @@
def getlanguage(code):
"""This returns a language class.
- @param code: The ISO 639 language code
+ :param code: The ISO 639 language code
"""
if code:
code = code.replace("-", "_").replace("@", "_").lower()
View
2  translate/lang/fr.py
@@ -20,7 +20,7 @@
"""This module represents French language.
-For more information, see U{http://en.wikipedia.org/wiki/French_language}
+For more information, see http://en.wikipedia.org/wiki/French_language
"""
import re
View
2  translate/lang/gu.py
@@ -20,7 +20,7 @@
"""This module represents Gujarati language.
-For more information, see U{http://en.wikipedia.org/wiki/Gujarati_language}
+For more information, see http://en.wikipedia.org/wiki/Gujarati_language
"""
from translate.lang import common
View
2  translate/lang/he.py
@@ -20,7 +20,7 @@
"""This module represents Hebrew language.
-For more information, see U{http://en.wikipedia.org/wiki/Hebrew_language}
+For more information, see http://en.wikipedia.org/wiki/Hebrew_language
"""
from translate.lang import common
View
2  translate/lang/hi.py
@@ -20,7 +20,7 @@
"""This module represents Hindi language.
-For more information, see U{http://en.wikipedia.org/wiki/Hindi_language}
+For more information, see http://en.wikipedia.org/wiki/Hindi_language
"""
from translate.lang import common
View
2  translate/lang/hy.py
@@ -20,7 +20,7 @@
"""This module represents Armenian language.
-For more information, see U{http://en.wikipedia.org/wiki/Armenian_language}
+For more information, see http://en.wikipedia.org/wiki/Armenian_language
"""
import re
View
18 translate/lang/identify.py
@@ -36,7 +36,7 @@ class LanguageIdentifier(object):
CONF_FILE = 'fpdb.conf'
"""
The name of the file that contains language name-code pairs
- (relative to C{MODEL_DIR}).
+ (relative to ``MODEL_DIR``).
"""
def __init__(self, model_dir=None, conf_file=None):
@@ -90,10 +90,10 @@ def identify_source_lang(self, instore):
"""Identify the source language of the given translation store or
units.
- @type instore: C{TranslationStore} or list or tuple of
- C{TranslationUnit}s.
- @param instore: The translation store to extract source text from.
- @returns: The identified language's code or C{None} if the language
+ :type instore: ``TranslationStore`` or list or tuple of
+ ``TranslationUnit``s.
+ :param instore: The translation store to extract source text from.
+ @returns: The identified language's code or ``None`` if the language
could not be identified."""
if not isinstance(instore, (TranslationStore, list, tuple)):
return None
@@ -107,10 +107,10 @@ def identify_target_lang(self, instore):
"""Identify the target language of the given translation store or
units.
- @type instore: C{TranslationStore} or list or tuple of
- C{TranslationUnit}s.
- @param instore: The translation store to extract target text from.
- @returns: The identified language's code or C{None} if the language
+ :type instore: ``TranslationStore`` or list or tuple of
+ ``TranslationUnit``s.
+ :param instore: The translation store to extract target text from.
+ @returns: The identified language's code or ``None`` if the language
could not be identified."""
if not isinstance(instore, (TranslationStore, list, tuple)):
return None
View
2  translate/lang/ja.py
@@ -20,7 +20,7 @@
"""This module represents Japanese language.
-For more information, see U{http://en.wikipedia.org/wiki/Japanese_language}
+For more information, see http://en.wikipedia.org/wiki/Japanese_language
"""
import re
View
2  translate/lang/km.py
@@ -20,7 +20,7 @@
"""This module represents Khmer language.
-For more information, see U{http://en.wikipedia.org/wiki/Khmer_language}
+For more information, see http://en.wikipedia.org/wiki/Khmer_language
"""
import re
View
2  translate/lang/kn.py
@@ -20,7 +20,7 @@
"""This module represents Kannada language.
-For more information, see U{http://en.wikipedia.org/wiki/Kannada_language}
+For more information, see http://en.wikipedia.org/wiki/Kannada_language
"""
from translate.lang import common
View
2  translate/lang/ko.py
@@ -20,7 +20,7 @@
"""This module represents Korean language.
-For more information, see U{http://en.wikipedia.org/wiki/Korean_language}
+For more information, see http://en.wikipedia.org/wiki/Korean_language
"""
from translate.lang import common
View
2  translate/lang/ml.py
@@ -20,7 +20,7 @@
"""This module represents Malayalam language.
-For more information, see U{http://en.wikipedia.org/wiki/Malayalam_language}
+For more information, see http://en.wikipedia.org/wiki/Malayalam_language
"""
from translate.lang import common
View
2  translate/lang/mr.py
@@ -20,7 +20,7 @@
"""This module represents Marathi language.
-For more information, see U{http://en.wikipedia.org/wiki/Marathi_language}
+For more information, see http://en.wikipedia.org/wiki/Marathi_language
"""
from translate.lang import common
View
2  translate/lang/ne.py
@@ -20,7 +20,7 @@
"""This module represents Nepali language.
-For more information, see U{http://en.wikipedia.org/wiki/Nepali_language}
+For more information, see http://en.wikipedia.org/wiki/Nepali_language
"""
import re
View
2  translate/lang/pa.py
@@ -20,7 +20,7 @@
"""This module represents Punjabi language.
-For more information, see U{http://en.wikipedia.org/wiki/Punjabi_language}
+For more information, see http://en.wikipedia.org/wiki/Punjabi_language
"""
import re
View
18 translate/lang/poedit.py
@@ -20,7 +20,7 @@
"""Functions to manage Poedit's language features.
- ISO 639 maps are form Poedit's U{isocode.cpp 1.4.2<http://poedit.svn.sourceforge.net/viewvc/poedit/poedit/tags/release-1.4.2/src/isocodes.cpp?revision=1452&view=markup>}
+ ISO 639 maps are form Poedit's `isocode.cpp 1.4.2 <http://poedit.svn.sourceforge.net/viewvc/poedit/poedit/tags/release-1.4.2/src/isocodes.cpp?revision=1452&view=markup>`_
to ensure that we match currently released versions of Poedit.
"""
@@ -195,7 +195,7 @@
Mostly these are identical to ISO 639, but there are some differences."""
lang_names = dict([(value, key) for (key, value) in lang_codes.items()])
-"""Reversed L{lang_codes}"""
+"""Reversed :ref:`lang_codes`"""
dialects = {
"Portuguese": {"PORTUGAL": "pt", "BRAZIL": "pt_BR", "None": "pt"},
@@ -215,15 +215,15 @@ def isocode(language, country=None):
- X-Poedit-Country
This function converts the supplied language name into the required ISO 639
- code. If needed, in the case of L{dialects}, the country name is used
+ code. If needed, in the case of :ref:`dialects`, the country name is used
to create an xx_YY style dialect code.
- @param language: Language name
- @type language: String
- @param country: Country name
- @type country: String
- @return: ISO 639 language code
- @rtype: String
+ :param language: Language name
+ :type language: String
+ :param country: Country name
+ :type country: String
+ :return: ISO 639 language code
+ :rtype: String
"""
dialect = dialects.get(language, None)
if dialect:
View
2  translate/lang/si.py
@@ -20,7 +20,7 @@
"""This module represents Sinhala language.
-For more information, see U{http://en.wikipedia.org/wiki/Sinhala_language}
+For more information, see http://en.wikipedia.org/wiki/Sinhala_language
"""
from translate.lang import common
View
2  translate/lang/sv.py
@@ -20,7 +20,7 @@
"""This module represents the Swedish language.
-For more information, see U{http://en.wikipedia.org/wiki/Swedish_language}
+For more information, see http://en.wikipedia.org/wiki/Swedish_language
"""
from translate.lang import common
View
2  translate/lang/ta.py
@@ -20,7 +20,7 @@
"""This module represents Tamil language.
-For more information, see U{http://en.wikipedia.org/wiki/Tamil_language}
+For more information, see http://en.wikipedia.org/wiki/Tamil_language
"""
from translate.lang import common
View
2  translate/lang/te.py
@@ -20,7 +20,7 @@
"""This module represents Telugu language.
-For more information, see U{http://en.wikipedia.org/wiki/Telugu_language}
+For more information, see http://en.wikipedia.org/wiki/Telugu_language
"""
from translate.lang import common
View
16 translate/lang/team.py
@@ -356,13 +356,13 @@
def _regex_guesser(prefilter, regex, string, postfilter=None):
"""Use regular expressions to extract the language team
- @param prefilter: simple filter to apply before attempting the regex
- @param regex: regular expression with one group that will contain
+ :param prefilter: simple filter to apply before attempting the regex
+ :param regex: regular expression with one group that will contain
the language code
- @param string: the language team string that should be examined
- @param postfilter: filter to apply to reject any potential matches
+ :param string: the language team string that should be examined
+ :param postfilter: filter to apply to reject any potential matches
after they have been retreived by the regex
- @return: ISO language code for the found language
+ :return: ISO language code for the found language
"""
# TODO instead of a posfilter, have a dictionary of transform rules
# e.g. for debian-l10n-albanian a dict of {'russian': 'ru' would allow
@@ -396,10 +396,10 @@ def _snippet_guesser(snippets_dict, string, filter_=_nofilter):
"""Guess the language based on a snippet of text in the language team
string.
- @param snippets_dict: A dict of snippets that can be used to identify a
+ :param snippets_dict: A dict of snippets that can be used to identify a
language in the format {'lang': ('snippet1', 'snippet2'), 'lang2'...}
- @param string: The language string to be analysed
- @param filter_: a function to be applied to the string and snippets
+ :param string: The language string to be analysed
+ :param filter_: a function to be applied to the string and snippets
before examination
"""
string = filter_(string)
View
2  translate/lang/th.py
@@ -20,7 +20,7 @@
"""This module represents Thai language.
-For more information, see U{http://en.wikipedia.org/wiki/Thai_language}
+For more information, see http://en.wikipedia.org/wiki/Thai_language
"""
from translate.lang import common
View
2  translate/lang/ug.py
@@ -20,7 +20,7 @@
"""This module represents Uyghur language.
-For more information, see U{http://en.wikipedia.org/wiki/Uyghur_language}
+For more information, see http://en.wikipedia.org/wiki/Uyghur_language
"""
from translate.lang import common
View
2  translate/lang/ur.py
@@ -20,7 +20,7 @@
"""This module represents Urdu language.
-For more information, see U{http://en.wikipedia.org/wiki/Urdu_language}
+For more information, see http://en.wikipedia.org/wiki/Urdu_language
"""
from translate.lang import common
View
2  translate/lang/vi.py
@@ -20,7 +20,7 @@
"""This module represents Vietnamese language.
-For more information, see U{http://en.wikipedia.org/wiki/Vietnamese_language}
+For more information, see http://en.wikipedia.org/wiki/Vietnamese_language
"""
from translate.lang import common
View
2  translate/lang/zh.py
@@ -20,7 +20,7 @@
"""This module represents Chinese language. (Both tradisional and simplified)
-For more information, see U{http://en.wikipedia.org/wiki/Chinese_language}
+For more information, see http://en.wikipedia.org/wiki/Chinese_language
"""
import re
View
4 translate/misc/file_discovery.py
@@ -29,8 +29,8 @@ def get_abs_data_filename(path_parts, basedirs=None):
"""Get the absolute path to the given file- or directory name in the current
running application's data directory.
- @type path_parts: list
- @param path_parts: The path parts that can be joined by os.path.join().
+ :type path_parts: list
+ :param path_parts: The path parts that can be joined by os.path.join().
"""
if basedirs is None:
basedirs = []
View
20 translate/misc/optrecurse.py
@@ -80,8 +80,8 @@ def __init__(self, formats, usetemplates=False, allowmissingtemplate=False,
description=None):
"""Construct the specialized Option Parser.
- @type formats: Dictionary
- @param formats: See L{setformats()} for an explanation of the formats
+ :type formats: Dictionary
+ :param formats: See :ref:`setformats()` for an explanation of the formats
parameter.
"""
@@ -240,14 +240,14 @@ def define_option(self, option):
def setformats(self, formats, usetemplates):
"""Sets the format options using the given format dictionary.
- @type formats: Dictionary
- @param formats: The dictionary I{keys} should be:
+ :type formats: Dictionary
+ :param formats: The dictionary *keys* should be:
- single strings (or 1-tuples) containing an input format (if not
usetemplates)
- tuples containing an input format and template format (if
usetemplates)
- formats can be None to indicate what to do with standard input
- The dictionary I{values} should be tuples of outputformat (string) and
+ The dictionary *values* should be tuples of outputformat (string) and
processor method.
"""
@@ -665,12 +665,12 @@ def recurseinputfiles(self, options):
return inputfiles
def splitext(self, pathname):
- """Splits L{pathname} into name and ext, and removes the extsep
+ """Splits :ref:`pathname` into name and ext, and removes the extsep
- @param pathname: A file path
- @type pathname: string
- @return: root, ext
- @rtype: tuple
+ :param pathname: A file path
+ :type pathname: string
+ :return: root, ext
+ :rtype: tuple
"""
root, ext = os.path.splitext(pathname)
ext = ext.replace(os.extsep, "", 1)
View
2  translate/misc/test_optrecurse.py
@@ -11,7 +11,7 @@ def __init__(self):
self.parser = optrecurse.RecursiveOptionParser({"txt": ("po", None)})
def test_splitext(self):
- """test the L{optrecurse.splitext} function"""
+ """test the :ref:`optrecurse.splitext` function"""
name = "name"
extension = "ext"
filename = name + os.extsep + extension
View
200 translate/search/indexing/CommonIndexer.py
@@ -36,8 +36,8 @@ def is_available():
this function must exist in every module that contains indexing engine
interfaces
- @return: is this interface usable?
- @rtype: bool
+ :return: is this interface usable?
+ :rtype: bool
"""
return False
@@ -84,15 +84,15 @@ def __init__(self, basedir, analyzer=None, create_allowed=True):
is incompatible (e.g. created by a different indexing engine)
@raise OSError: the database failed to initialize
- @param basedir: the parent directory of the database
- @type basedir: str
- @param analyzer: bitwise combination of possible analyzer flags
+ :param basedir: the parent directory of the database
+ :type basedir: str
+ :param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this database. Leave it empty
to use the system default analyzer (self.ANALYZER_DEFAULT).
see self.ANALYZER_TOKENIZE, self.ANALYZER_PARTIAL, ...
- @type analyzer: int
- @param create_allowed: create the database, if necessary; default: True
- @type create_allowed: bool
+ :type analyzer: int
+ :param create_allowed: create the database, if necessary; default: True
+ :type create_allowed: bool
"""
# just do some checks
if self.QUERY_TYPE is None:
@@ -117,8 +117,8 @@ def flush(self, optimize=False):
some databases also support index optimization
- @param optimize: should the index be optimized if possible?
- @type optimize: bool
+ :param optimize: should the index be optimized if possible?
+ :type optimize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'flush' is missing")
@@ -131,25 +131,25 @@ def make_query(self, args, require_all=True, analyzer=None):
'set_field_analyzers'. The parameter 'match_text_partial' can override
the previously defined default setting.
- @param args: queries or search string or description of field query
+ :param args: queries or search string or description of field query
examples::
[xapian.Query("foo"), xapian.Query("bar")]
xapian.Query("foo")
"bar"
{"foo": "bar", "foobar": "foo"}
- @type args: list of queries | single query | str | dict
- @param require_all: boolean operator
+ :type args: list of queries | single query | str | dict
+ :param require_all: boolean operator
(True -> AND (default) / False -> OR)
- @type require_all: boolean
- @param analyzer: (only applicable for 'dict' or 'str')
+ :type require_all: boolean
+ :param analyzer: (only applicable for 'dict' or 'str')
Define query options (partial matching, exact matching, tokenizing,
...) as bitwise combinations of CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
- @type analyzer: int
- @return: the combined query
- @rtype: query type of the specific implemention
+ :type analyzer: int
+ :return: the combined query
+ :rtype: query type of the specific implemention
"""
# turn a dict into a list if necessary
if isinstance(args, dict):
@@ -194,10 +194,10 @@ def _create_query_for_query(self, query):
basically this function should just create a copy of the original
- @param query: the original query object
- @type query: xapian.Query
- @return: the resulting query object
- @rtype: xapian.Query | PyLucene.Query
+ :param query: the original query object
+ :type query: xapian.Query
+ :return: the resulting query object
+ :rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_query' is missing")
@@ -209,20 +209,20 @@ def _create_query_for_string(self, text, require_all=True,
basically this function parses the string and returns the resulting
query
- @param text: the query string
- @type text: str
- @param require_all: boolean operator
+ :param text: the query string
+ :type text: str
+ :param require_all: boolean operator
(True -> AND (default) / False -> OR)
- @type require_all: bool
- @param analyzer: Define query options (partial matching, exact matching,
+ :type require_all: bool
+ :param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
- @type analyzer: int
- @return: resulting query object
- @rtype: xapian.Query | PyLucene.Query
+ :type analyzer: int
+ :return: resulting query object
+ :rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_string' is missing")
@@ -232,19 +232,19 @@ def _create_query_for_field(self, field, value, analyzer=None):
this functions creates a field->value query
- @param field: the fieldname to be used
- @type field: str
- @param value: the wanted value of the field
- @type value: str
- @param analyzer: Define query options (partial matching, exact matching,
+ :param field: the fieldname to be used
+ :type field: str
+ :param value: the wanted value of the field
+ :type value: str
+ :param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
- @type analyzer: int
- @return: resulting query object
- @rtype: xapian.Query | PyLucene.Query
+ :type analyzer: int
+ :return: resulting query object
+ :rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_field' is missing")
@@ -252,13 +252,13 @@ def _create_query_for_field(self, field, value, analyzer=None):
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
- @param queries: list of the original queries
- @type queries: list of xapian.Query
- @param require_all: boolean operator
+ :param queries: list of the original queries
+ :type queries: list of xapian.Query
+ :param require_all: boolean operator
(True -> AND (default) / False -> OR)
- @type require_all: bool
- @return: the resulting combined query object
- @rtype: xapian.Query | PyLucene.Query
+ :type require_all: bool
+ :return: the resulting combined query object
+ :rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_combined' is missing")
@@ -266,13 +266,13 @@ def _create_query_combined(self, queries, require_all=True):
def index_document(self, data):
"""add the given data to the database
- @param data: the data to be indexed.
+ :param data: the data to be indexed.
A dictionary will be treated as fieldname:value combinations.
If the fieldname is None then the value will be interpreted as a
plain term or as a list of plain terms.
Lists of terms are indexed separately.
Lists of strings are treated as plain terms.
- @type data: dict | list of str
+ :type data: dict | list of str
"""
doc = self._create_empty_document()
if isinstance(data, dict):
@@ -312,8 +312,8 @@ def index_document(self, data):
def _create_empty_document(self):
"""create an empty document to be filled and added to the index later
- @return: the new document object
- @rtype: xapian.Document | PyLucene.Document
+ :return: the new document object
+ :rtype: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_empty_document' is missing")
@@ -321,12 +321,12 @@ def _create_empty_document(self):
def _add_plain_term(self, document, term, tokenize=True):
"""add a term to a document
- @param document: the document to be changed
- @type document: xapian.Document | PyLucene.Document
- @param term: a single term to be added
- @type term: str
- @param tokenize: should the term be tokenized automatically
- @type tokenize: bool
+ :param document: the document to be changed
+ :type document: xapian.Document | PyLucene.Document
+ :param term: a single term to be added
+ :type term: str
+ :param tokenize: should the term be tokenized automatically
+ :type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_plain_term' is missing")
@@ -334,14 +334,14 @@ def _add_plain_term(self, document, term, tokenize=True):
def _add_field_term(self, document, field, term, tokenize=True):
"""add a field term to a document
- @param document: the document to be changed
- @type document: xapian.Document | PyLucene.Document
- @param field: name of the field
- @type field: str
- @param term: term to be associated to the field
- @type term: str
- @param tokenize: should the term be tokenized automatically
- @type tokenize: bool
+ :param document: the document to be changed
+ :type document: xapian.Document | PyLucene.Document
+ :param field: name of the field
+ :type field: str
+ :param term: term to be associated to the field
+ :type term: str
+ :param tokenize: should the term be tokenized automatically
+ :type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_field_term' is missing")
@@ -349,8 +349,8 @@ def _add_field_term(self, document, field, term, tokenize=True):
def _add_document_to_index(self, document):
"""add a prepared document to the index database
- @param document: the document to be added
- @type document: xapian.Document | PyLucene.Document
+ :param document: the document to be added
+ :type document: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_document_to_index' is missing")
@@ -388,10 +388,10 @@ def commit_transaction(self):
def get_query_result(self, query):
"""return an object containing the results of a query
- @param query: a pre-compiled query
- @type query: a query object of the real implementation
- @return: an object that allows access to the results
- @rtype: subclass of CommonEnquire
+ :param query: a pre-compiled query
+ :type query: a query object of the real implementation
+ :return: an object that allows access to the results
+ :rtype: subclass of CommonEnquire
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'get_query_result' is missing")
@@ -399,8 +399,8 @@ def get_query_result(self, query):
def delete_document_by_id(self, docid):
"""delete a specified document
- @param docid: the document ID to be deleted
- @type docid: int
+ :param docid: the document ID to be deleted
+ :type docid: int
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'delete_document_by_id' is missing")
@@ -409,12 +409,12 @@ def search(self, query, fieldnames):
"""return a list of the contents of specified fields for all matches of
a query
- @param query: the query to be issued
- @type query: a query object of the real implementation
- @param fieldnames: the name(s) of a field of the document content
- @type fieldnames: string | list of strings
- @return: a list of dicts containing the specified field(s)
- @rtype: list of dicts
+ :param query: the query to be issued
+ :type query: a query object of the real implementation
+ :param fieldnames: the name(s) of a field of the document content
+ :type fieldnames: string | list of strings
+ :return: a list of dicts containing the specified field(s)
+ :rtype: list of dicts
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'search' is missing")
@@ -422,8 +422,8 @@ def search(self, query, fieldnames):
def delete_doc(self, ident):
"""delete the documents returned by a query
- @param ident: [list of] document IDs | dict describing a query | query
- @type ident: int | list of tuples | dict | list of dicts |
+ :param ident: [list of] document IDs | dict describing a query | query
+ :type ident: int | list of tuples | dict | list of dicts |
query (e.g. xapian.Query) | list of queries
"""
# turn a doc-ID into a list of doc-IDs
@@ -473,12 +473,12 @@ def _walk_matches(self, query, function, arg_for_function=None):
self._walk_matches(query, function_for_match, arg_for_func)
'function_for_match' expects only one argument: the matched object
- @param query: a query object of the real implementation
- @type query: xapian.Query | PyLucene.Query
- @param function: the function to execute with every match
- @type function: function
- @param arg_for_function: an optional argument for the function
- @type arg_for_function: anything
+ :param query: a query object of the real implementation
+ :type query: xapian.Query | PyLucene.Query
+ :param function: the function to execute with every match
+ :type function: function
+ :param arg_for_function: an optional argument for the function
+ :type arg_for_function: anything
"""
# execute the query
enquire = self.get_query_result(query)
@@ -502,8 +502,8 @@ def set_field_analyzers(self, field_analyzers):
All bitwise combinations of CommonIndexer.ANALYZER_??? are possible.
- @param field_analyzers: mapping of field names and analyzers
- @type field_analyzers: dict containing field names and analyzers
+ :param field_analyzers: mapping of field names and analyzers
+ :type field_analyzers: dict containing field names and analyzers
@raise TypeError: invalid values in 'field_analyzers'
"""
for field, analyzer in field_analyzers.items():
@@ -520,12 +520,12 @@ def get_field_analyzers(self, fieldnames=None):
see 'set_field_analyzers' for details
- @param fieldnames: the analyzer of this field (or all/multiple fields)
+ :param fieldnames: the analyzer of this field (or all/multiple fields)
is requested; leave empty (or "None") to request all fields
- @type fieldnames: str | list of str | None
- @return: the analyzer setting of the field - see
+ :type fieldnames: str | list of str | None
+ :return: the analyzer setting of the field - see
CommonDatabase.ANALYZER_??? or a dict of field names and analyzers
- @rtype: int | dict
+ :rtype: int | dict
"""
# all field analyzers are requested
if fieldnames is None:
@@ -569,20 +569,20 @@ class CommonEnquire(object):
def __init__(self, enquire):
"""intialization of a wrapper around enquires of different backends
- @param enquire: a previous enquire
- @type enquire: xapian.Enquire | pylucene-enquire
+ :param enquire: a previous enquire
+ :type enquire: xapian.Enquire | pylucene-enquire
"""
self.enquire = enquire
def get_matches(self, start, number):
"""return a specified number of qualified matches of a previous query
- @param start: index of the first match to return (starting from zero)
- @type start: int
- @param number: the number of matching entries to return
- @type number: int
- @return: a set of matching entries and some statistics
- @rtype: tuple of (returned number, available number, matches)
+ :param start: index of the first match to return (starting from zero)
+ :type start: int
+ :param number: the number of matching entries to return
+ :type number: int
+ :return: a set of matching entries and some statistics
+ :rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
@@ -593,8 +593,8 @@ def get_matches_count(self):
"""return the estimated number of matches
use "CommonIndexer.search" to retrieve the exact number of matches
- @return: the estimaed number of matches
- @rtype: int
+ :return: the estimaed number of matches
+ :rtype: int
"""
(returned, estimate_count, matches) = self.get_matches(0, 1)
return estimate_count
View
156 translate/search/indexing/PyLuceneIndexer.py
@@ -71,15 +71,15 @@ def __init__(self, basedir, analyzer=None, create_allowed=True):
is incompatible (e.g. created by a different indexing engine)
@raise OSError: the database failed to initialize
- @param basedir: the parent directory of the database
- @type basedir: str
- @param analyzer: bitwise combination of possible analyzer flags
+ :param basedir: the parent directory of the database
+ :type basedir: str
+ :param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this database. Leave it empty
to use the system default analyzer (self.ANALYZER_DEFAULT).
see self.ANALYZER_TOKENIZE, self.ANALYZER_PARTIAL, ...
- @type analyzer: int
- @param create_allowed: create the database, if necessary; default: True
- @type create_allowed: bool
+ :type analyzer: int
+ :param create_allowed: create the database, if necessary; default: True
+ :type create_allowed: bool
"""
jvm = PyLucene.getVMEnv()
jvm.attachCurrentThread()
@@ -164,8 +164,8 @@ def flush(self, optimize=False):
some databases also support index optimization
- @param optimize: should the index be optimized if possible?
- @type optimize: bool
+ :param optimize: should the index be optimized if possible?
+ :type optimize: bool
"""
keep_open = self._writer_is_open()
self._writer_open()
@@ -187,10 +187,10 @@ def _create_query_for_query(self, query):
basically this function should just create a copy of the original
- @param query: the original query object
- @type query: PyLucene.Query
- @return: resulting query object
- @rtype: PyLucene.Query
+ :param query: the original query object
+ :type query: PyLucene.Query
+ :return: resulting query object
+ :rtype: PyLucene.Query
"""
# TODO: a deep copy or a clone would be safer
# somehow not working (returns "null"): copy.deepcopy(query)
@@ -203,22 +203,22 @@ def _create_query_for_string(self, text, require_all=True,
basically this function parses the string and returns the resulting
query
- @param text: the query string
- @type text: str
- @param require_all: boolean operator
+ :param text: the query string
+ :type text: str
+ :param require_all: boolean operator
(True -> AND (default) / False -> OR)
- @type require_all: bool
- @param analyzer: the analyzer to be used
+ :type require_all: bool
+ :param analyzer: the analyzer to be used
possible analyzers are:
- - L{CommonDatabase.ANALYZER_TOKENIZE}
+ - :ref:`CommonDatabase.ANALYZER_TOKENIZE`
the field value is splitted to be matched word-wise
- - L{CommonDatabase.ANALYZER_PARTIAL}
+ - :ref:`CommonDatabase.ANALYZER_PARTIAL`
the field value must start with the query string
- - L{CommonDatabase.ANALYZER_EXACT}
+ - :ref:`CommonDatabase.ANALYZER_EXACT`
keep special characters and the like
- @type analyzer: bool
- @return: resulting query object
- @rtype: PyLucene.Query
+ :type analyzer: bool
+ :return: resulting query object
+ :rtype: PyLucene.Query
"""
if analyzer is None:
analyzer = self.analyzer
@@ -242,21 +242,21 @@ def _create_query_for_field(self, field, value, analyzer=None):
this functions creates a field->value query
- @param field: the fieldname to be used
- @type field: str
- @param value: the wanted value of the field
- @type value: str
- @param analyzer: the analyzer to be used
+ :param field: the fieldname to be used
+ :type field: str
+ :param value: the wanted value of the field
+ :type value: str
+ :param analyzer: the analyzer to be used
possible analyzers are:
- - L{CommonDatabase.ANALYZER_TOKENIZE}
+ - :ref:`CommonDatabase.ANALYZER_TOKENIZE`
the field value is splitted to be matched word-wise
- - L{CommonDatabase.ANALYZER_PARTIAL}
+ - :ref:`CommonDatabase.ANALYZER_PARTIAL`
the field value must start with the query string
- - L{CommonDatabase.ANALYZER_EXACT}
+ - :ref:`CommonDatabase.ANALYZER_EXACT`
keep special characters and the like
- @type analyzer: bool
- @return: resulting query object
- @rtype: PyLucene.Query
+ :type analyzer: bool
+ :return: resulting query object
+ :rtype: PyLucene.Query
"""
if analyzer is None:
analyzer = self.analyzer
@@ -274,13 +274,13 @@ def _create_query_for_field(self, field, value, analyzer=None):
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
- @param queries: list of the original queries
- @type queries: list of PyLucene.Query
- @param require_all: boolean operator
+ :param queries: list of the original queries
+ :type queries: list of PyLucene.Query
+ :param require_all: boolean operator
(True -> AND (default) / False -> OR)