Permalink
Browse files

Added example extension that uses the stream filtering and added unit…

…tests.

--HG--
branch : trunk
  • Loading branch information...
1 parent 3e3a9be commit d02fc7db91a3f67f04489ddade7d9387d90eeab1 @mitsuhiko mitsuhiko committed Jun 14, 2008
Showing with 100 additions and 8 deletions.
  1. +5 −0 docs/extensions.rst
  2. +78 −0 ext/inlinegettext.py
  3. +6 −1 jinja2/ext.py
  4. +7 −0 jinja2/lexer.py
  5. +4 −7 tests/test_ext.py
View
@@ -229,6 +229,11 @@ extensions:
The value of the token.
+There is also a utility function in the lexer module that can count newline
+characters in strings:
+
+.. autofunction:: jinja2.lexer.count_newlines
+
AST
~~~
View
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+"""
+ Inline Gettext
+ ~~~~~~~~~~~~~~
+
+ An example extension for Jinja2 that supports inline gettext calls.
+ Requires the i18n extension to be loaded.
+
+ :copyright: Copyright 2008 by Armin Ronacher.
+ :license: BSD.
+"""
+import re
+from jinja2.ext import Extension
+from jinja2.lexer import Token, count_newlines
+from jinja2.exceptions import TemplateSyntaxError
+
+
+_outside_re = re.compile(r'\\?(gettext|_)\(')
+_inside_re = re.compile(r'\\?[()]')
+
+
+class InlineGettext(Extension):
+ """This extension implements support for inline gettext blocks::
+
+ <h1>_(Welcome)</h1>
+ <p>_(This is a paragraph)</p>
+
+ Requires the i18n extension to be loaded and configured.
+ """
+
+ def filter_stream(self, stream):
+ paren_stack = 0
+
+ for token in stream:
+ if token.type is not 'data':
+ yield token
+ continue
+
+ pos = 0
+ lineno = token.lineno
+
+ while 1:
+ if not paren_stack:
+ match = _outside_re.search(token.value, pos)
+ else:
+ match = _inside_re.search(token.value, pos)
+ if match is None:
+ break
+ new_pos = match.start()
+ if new_pos > pos:
+ preval = token.value[pos:new_pos]
+ yield Token(lineno, 'data', preval)
+ lineno += count_newlines(preval)
+ gtok = match.group()
+ if gtok[0] == '\\':
+ yield Token(lineno, 'data', gtok[1:])
+ elif not paren_stack:
+ yield Token(lineno, 'block_begin', None)
+ yield Token(lineno, 'name', 'trans')
+ yield Token(lineno, 'block_end', None)
+ paren_stack = 1
+ else:
+ if gtok == '(' or paren_stack > 1:
+ yield Token(lineno, 'data', gtok)
+ paren_stack += gtok == ')' and -1 or 1
+ if not paren_stack:
+ yield Token(lineno, 'block_begin', None)
+ yield Token(lineno, 'name', 'endtrans')
+ yield Token(lineno, 'block_end', None)
+ pos = match.end()
+
+ if pos < len(token.value):
+ yield Token(lineno, 'data', token.value[pos:])
+
+ if paren_stack:
+ raise TemplateSyntaxError('unclosed gettext expression',
+ token.lineno, stream.name,
+ stream.filename)
View
@@ -16,7 +16,6 @@
from jinja2.environment import get_spontaneous_environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.lexer import Token
from jinja2.utils import contextfunction, import_string, Markup
@@ -80,6 +79,10 @@ def filter_stream(self, stream):
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
+
+ In the `ext` folder of the Jinja2 source distribution there is a file
+ called `inlinegettext.py` which implements a filter that utilizes this
+ method.
"""
return stream
@@ -261,6 +264,8 @@ def _parse_block(self, parser, allow_pluralize):
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
+ elif parser.stream.eos:
+ parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
View
@@ -71,6 +71,13 @@
sorted(operators, key=lambda x: -len(x))))
+def count_newlines(value):
+ """Count the number of newline characters in the string. This is
+ useful for extensions that filter a stream.
+ """
+ return len(newline_re.findall(value))
+
+
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
View
@@ -8,14 +8,14 @@
"""
import re
from jinja2 import Environment, nodes
-from jinja2.ext import Extension, Token
+from jinja2.ext import Extension
+from jinja2.lexer import Token, count_newlines
importable_object = 23
-_line_re = re.compile(r'(\r\n|\r|\n)')
-_gettext_re = re.compile(r'_\((([^)\\]*(?:\\.[^)\\]*)*))\)(?s)')
+_gettext_re = re.compile(r'_\((.*?)\)')
class TestExtension(Extension):
@@ -55,9 +55,6 @@ def filter_stream(self, stream):
else:
yield token
- def count_lines(self, value):
- return len(_line_re.findall(value))
-
def interpolate(self, token):
pos = 0
end = len(token.value)
@@ -69,7 +66,7 @@ def interpolate(self, token):
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
- lineno += self.count_lines(token.value)
+ lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)

0 comments on commit d02fc7d

Please sign in to comment.