Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Newer
Older
100644 735 lines (654 sloc) 28.425 kb
92f572f @mitsuhiko [svn] added new jinja trunk
authored
1 # -*- coding: utf-8 -*-
2 """
07bc684 @mitsuhiko a clean restart
authored
3 jinja2.lexer
4 ~~~~~~~~~~~~
3b65b8a @mitsuhiko [svn] added minimal template evaluator
authored
5
5a8e497 @mitsuhiko [svn] moved some of the documentation into docstrings
authored
6 This module implements a Jinja / Python combination lexer. The
7 `Lexer` class provided by this module is used to do some preprocessing
8 for Jinja.
9
10 On the one hand it filters out invalid operators like the bitshift
11 operators we don't allow in templates. On the other hand it separates
12 template code and python code in expressions.
13
55494e4 @mitsuhiko It's a new year
authored
14 :copyright: (c) 2010 by the Jinja Team.
3b65b8a @mitsuhiko [svn] added minimal template evaluator
authored
15 :license: BSD, see LICENSE for more details.
92f572f @mitsuhiko [svn] added new jinja trunk
authored
16 """
17 import re
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
18
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
19 from operator import itemgetter
20 from collections import deque
82b3f3d @mitsuhiko first version of new parser
authored
21 from jinja2.exceptions import TemplateSyntaxError
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
22 from jinja2.utils import LRUCache
b89d1a8 @mitsuhiko Since we no longer support 2.5 we can use the builtin next() function
authored
23 from jinja2._compat import iteritems, implements_iterator, text_type, \
06badeb @mitsuhiko Fix for #304
authored
24 intern, PY2
92f572f @mitsuhiko [svn] added new jinja trunk
authored
25
26
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
27 # cache for the lexers. Exists in order to be able to have multiple
28 # environments with the same lexer
187bde1 @mitsuhiko added cache_clear function
authored
29 _lexer_cache = LRUCache(50)
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
30
92f572f @mitsuhiko [svn] added new jinja trunk
authored
31 # static regular expressions
9a0078d @mitsuhiko Removed a few stdlib dependencies. This is the first step for IronPytho...
authored
32 whitespace_re = re.compile(r'\s+', re.U)
92f572f @mitsuhiko [svn] added new jinja trunk
authored
33 string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
9a0078d @mitsuhiko Removed a few stdlib dependencies. This is the first step for IronPytho...
authored
34 r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
1cc232c @mitsuhiko [svn] merged newparser into trunk
authored
35 integer_re = re.compile(r'\d+')
bd35772 @mitsuhiko More Python 3 support.
authored
36
37 # we use the unicode identifier rule if this python version is able
38 # to handle unicode identifiers, otherwise the standard ASCII one.
39 try:
40 compile('föö', '<unknown>', 'eval')
41 except SyntaxError:
42 name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
43 else:
44 from jinja2 import _stringdefs
45 name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
46 _stringdefs.xid_continue))
47
cb1b97f @mitsuhiko It's now possible to use ``{{ foo.0.0 }}``
authored
48 float_re = re.compile(r'(?<!\.)\d+\.\d+')
f3c35c4 @mitsuhiko end of line sequence is no configurable
authored
49 newline_re = re.compile(r'(\r\n|\r|\n)')
1cc232c @mitsuhiko [svn] merged newparser into trunk
authored
50
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
51 # internal the tokens and keep references to them
52 TOKEN_ADD = intern('add')
53 TOKEN_ASSIGN = intern('assign')
54 TOKEN_COLON = intern('colon')
55 TOKEN_COMMA = intern('comma')
56 TOKEN_DIV = intern('div')
57 TOKEN_DOT = intern('dot')
58 TOKEN_EQ = intern('eq')
59 TOKEN_FLOORDIV = intern('floordiv')
60 TOKEN_GT = intern('gt')
61 TOKEN_GTEQ = intern('gteq')
62 TOKEN_LBRACE = intern('lbrace')
63 TOKEN_LBRACKET = intern('lbracket')
64 TOKEN_LPAREN = intern('lparen')
65 TOKEN_LT = intern('lt')
66 TOKEN_LTEQ = intern('lteq')
67 TOKEN_MOD = intern('mod')
68 TOKEN_MUL = intern('mul')
69 TOKEN_NE = intern('ne')
70 TOKEN_PIPE = intern('pipe')
71 TOKEN_POW = intern('pow')
72 TOKEN_RBRACE = intern('rbrace')
73 TOKEN_RBRACKET = intern('rbracket')
74 TOKEN_RPAREN = intern('rparen')
75 TOKEN_SEMICOLON = intern('semicolon')
76 TOKEN_SUB = intern('sub')
77 TOKEN_TILDE = intern('tilde')
78 TOKEN_WHITESPACE = intern('whitespace')
79 TOKEN_FLOAT = intern('float')
80 TOKEN_INTEGER = intern('integer')
81 TOKEN_NAME = intern('name')
82 TOKEN_STRING = intern('string')
83 TOKEN_OPERATOR = intern('operator')
84 TOKEN_BLOCK_BEGIN = intern('block_begin')
85 TOKEN_BLOCK_END = intern('block_end')
86 TOKEN_VARIABLE_BEGIN = intern('variable_begin')
87 TOKEN_VARIABLE_END = intern('variable_end')
88 TOKEN_RAW_BEGIN = intern('raw_begin')
89 TOKEN_RAW_END = intern('raw_end')
90 TOKEN_COMMENT_BEGIN = intern('comment_begin')
91 TOKEN_COMMENT_END = intern('comment_end')
92 TOKEN_COMMENT = intern('comment')
93 TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
94 TOKEN_LINESTATEMENT_END = intern('linestatement_end')
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
95 TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
96 TOKEN_LINECOMMENT_END = intern('linecomment_end')
97 TOKEN_LINECOMMENT = intern('linecomment')
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
98 TOKEN_DATA = intern('data')
99 TOKEN_INITIAL = intern('initial')
100 TOKEN_EOF = intern('eof')
101
1cc232c @mitsuhiko [svn] merged newparser into trunk
authored
102 # bind operators to token types
103 operators = {
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
104 '+': TOKEN_ADD,
105 '-': TOKEN_SUB,
106 '/': TOKEN_DIV,
107 '//': TOKEN_FLOORDIV,
108 '*': TOKEN_MUL,
109 '%': TOKEN_MOD,
110 '**': TOKEN_POW,
111 '~': TOKEN_TILDE,
112 '[': TOKEN_LBRACKET,
113 ']': TOKEN_RBRACKET,
114 '(': TOKEN_LPAREN,
115 ')': TOKEN_RPAREN,
116 '{': TOKEN_LBRACE,
117 '}': TOKEN_RBRACE,
118 '==': TOKEN_EQ,
119 '!=': TOKEN_NE,
120 '>': TOKEN_GT,
121 '>=': TOKEN_GTEQ,
122 '<': TOKEN_LT,
123 '<=': TOKEN_LTEQ,
124 '=': TOKEN_ASSIGN,
125 '.': TOKEN_DOT,
126 ':': TOKEN_COLON,
127 '|': TOKEN_PIPE,
128 ',': TOKEN_COMMA,
129 ';': TOKEN_SEMICOLON
1cc232c @mitsuhiko [svn] merged newparser into trunk
authored
130 }
131
e909867 @mitsuhiko Moved all six usages (ignoring testsuite) into jinja2._compat
authored
132 reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
1cc232c @mitsuhiko [svn] merged newparser into trunk
authored
133 assert len(operators) == len(reverse_operators), 'operators dropped'
e791c2a @mitsuhiko added first working pieces of compiler
authored
134 operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
135 sorted(operators, key=lambda x: -len(x))))
1cc232c @mitsuhiko [svn] merged newparser into trunk
authored
136
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
137 ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
138 TOKEN_COMMENT_END, TOKEN_WHITESPACE,
7c83462 @fengsp Removed duplication from ignored_tokens in lexer
fengsp authored
139 TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
140 TOKEN_LINECOMMENT])
db7985d @mitsuhiko Fixed bug in line-based comments with priority.
authored
141 ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
142 TOKEN_COMMENT, TOKEN_LINECOMMENT])
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
143
92f572f @mitsuhiko [svn] added new jinja trunk
authored
144
5dcb724 @mitsuhiko greatly improved error message reporting. This fixes #339
authored
145 def _describe_token_type(token_type):
146 if token_type in reverse_operators:
147 return reverse_operators[token_type]
148 return {
149 TOKEN_COMMENT_BEGIN: 'begin of comment',
150 TOKEN_COMMENT_END: 'end of comment',
151 TOKEN_COMMENT: 'comment',
152 TOKEN_LINECOMMENT: 'comment',
153 TOKEN_BLOCK_BEGIN: 'begin of statement block',
154 TOKEN_BLOCK_END: 'end of statement block',
155 TOKEN_VARIABLE_BEGIN: 'begin of print statement',
156 TOKEN_VARIABLE_END: 'end of print statement',
157 TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
158 TOKEN_LINESTATEMENT_END: 'end of line statement',
159 TOKEN_DATA: 'template data / text',
160 TOKEN_EOF: 'end of template'
161 }.get(token_type, token_type)
162
163
164 def describe_token(token):
165 """Returns a description of the token."""
166 if token.type == 'name':
167 return token.value
168 return _describe_token_type(token.type)
169
170
171 def describe_token_expr(expr):
172 """Like `describe_token` but for token expressions."""
173 if ':' in expr:
174 type, value = expr.split(':', 1)
175 if type == 'name':
176 return value
177 else:
178 type = expr
179 return _describe_token_type(type)
180
181
d02fc7d @mitsuhiko Added example extension that uses the stream filtering and added unittes...
authored
182 def count_newlines(value):
183 """Count the number of newline characters in the string. This is
184 useful for extensions that filter a stream.
185 """
186 return len(newline_re.findall(value))
187
188
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
189 def compile_rules(environment):
190 """Compiles all the rules from the environment into a list of rules."""
191 e = re.escape
192 rules = [
193 (len(environment.comment_start_string), 'comment',
194 e(environment.comment_start_string)),
195 (len(environment.block_start_string), 'block',
196 e(environment.block_start_string)),
197 (len(environment.variable_start_string), 'variable',
1bb3ab7 @mitsuhiko Fixed another bug with line comments
authored
198 e(environment.variable_start_string))
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
199 ]
200
201 if environment.line_statement_prefix is not None:
202 rules.append((len(environment.line_statement_prefix), 'linestatement',
08f38a8 Prevent line statements eating newlines (fixes #52)
Adam Spiers authored
203 r'^[ \t\v]*' + e(environment.line_statement_prefix)))
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
204 if environment.line_comment_prefix is not None:
205 rules.append((len(environment.line_comment_prefix), 'linecomment',
a0727a6 @mitsuhiko Broken overlong line.
authored
206 r'(?:^|(?<=\S))[^\S\r\n]*' +
207 e(environment.line_comment_prefix)))
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
208
209 return [x[1:] for x in sorted(rules, reverse=True)]
210
211
92f572f @mitsuhiko [svn] added new jinja trunk
authored
212 class Failure(object):
b5124e6 @mitsuhiko moved trans extension from jinja2.i18n to jinja2.ext and fixed some more...
authored
213 """Class that raises a `TemplateSyntaxError` if called.
92f572f @mitsuhiko [svn] added new jinja trunk
authored
214 Used by the `Lexer` to specify known errors.
215 """
216
217 def __init__(self, message, cls=TemplateSyntaxError):
218 self.message = message
219 self.error_class = cls
220
720e55b @mitsuhiko [svn] fixed a lexer error reporting issue
authored
221 def __call__(self, lineno, filename):
222 raise self.error_class(self.message, lineno, filename)
92f572f @mitsuhiko [svn] added new jinja trunk
authored
223
224
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
225 class Token(tuple):
226 """Token class."""
227 __slots__ = ()
228 lineno, type, value = (property(itemgetter(x)) for x in range(3))
229
230 def __new__(cls, lineno, type, value):
231 return tuple.__new__(cls, (lineno, intern(str(type)), value))
232
233 def __str__(self):
8a1d27f @mitsuhiko temporary identifiers are prefixed with "t_" now and the _node_setup_fin...
authored
234 if self.type in reverse_operators:
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
235 return reverse_operators[self.type]
272ca2a @aliafshar This commit makes the parser and lexer use regular string comparison to ...
aliafshar authored
236 elif self.type == 'name':
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
237 return self.value
238 return self.type
239
240 def test(self, expr):
241 """Test a token against a token expression. This can either be a
023b5e9 @mitsuhiko First extension interface documentation and updates in that interface
authored
242 token type or ``'token_type:token_value'``. This can only test
243 against string values and types.
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
244 """
cda43df @mitsuhiko updated filters: wordwraps uses the wordwrap module and urlize marks the...
authored
245 # here we do a regular string equality check as test_any is usually
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
246 # passed an iterable of not interned strings.
247 if self.type == expr:
248 return True
249 elif ':' in expr:
250 return expr.split(':', 1) == [self.type, self.value]
251 return False
252
cda43df @mitsuhiko updated filters: wordwraps uses the wordwrap module and urlize marks the...
authored
253 def test_any(self, *iterable):
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
254 """Test against multiple token expressions."""
255 for expr in iterable:
256 if self.test(expr):
257 return True
258 return False
259
260 def __repr__(self):
261 return 'Token(%r, %r, %r)' % (
262 self.lineno,
263 self.type,
264 self.value
265 )
266
267
28c7488 @mitsuhiko Make the runtime leak less of the version hacks
authored
268 @implements_iterator
269 class TokenStreamIterator(object):
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
270 """The iterator for tokenstreams. Iterate over the stream
271 until the eof token is reached.
272 """
273
274 def __init__(self, stream):
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
275 self.stream = stream
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
276
277 def __iter__(self):
278 return self
279
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
280 def __next__(self):
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
281 token = self.stream.current
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
282 if token.type is TOKEN_EOF:
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
283 self.stream.close()
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
284 raise StopIteration()
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
285 next(self.stream)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
286 return token
287
288
28c7488 @mitsuhiko Make the runtime leak less of the version hacks
authored
289 @implements_iterator
290 class TokenStream(object):
023b5e9 @mitsuhiko First extension interface documentation and updates in that interface
authored
291 """A token stream is an iterable that yields :class:`Token`\s. The
292 parser however does not iterate over it but calls :meth:`next` to go
293 one token ahead. The current active token is stored as :attr:`current`.
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
294 """
295
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
296 def __init__(self, generator, name, filename):
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
297 self._iter = iter(generator)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
298 self._pushed = deque()
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
299 self.name = name
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
300 self.filename = filename
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
301 self.closed = False
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
302 self.current = Token(1, TOKEN_INITIAL, '')
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
303 next(self)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
304
305 def __iter__(self):
306 return TokenStreamIterator(self)
307
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
308 def __bool__(self):
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
309 return bool(self._pushed) or self.current.type is not TOKEN_EOF
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
310 __nonzero__ = __bool__ # py2
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
311
42a1988 @mitsuhiko Started to work on Python 3 support.
authored
312 eos = property(lambda x: not x, doc="Are we at the end of the stream?")
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
313
314 def push(self, token):
315 """Push a token back to the stream."""
316 self._pushed.append(token)
317
318 def look(self):
319 """Look at the next token."""
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
320 old_token = next(self)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
321 result = self.current
322 self.push(result)
323 self.current = old_token
324 return result
325
ea847c5 @mitsuhiko added "with context" or "without context" import/include modifiers
authored
326 def skip(self, n=1):
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
327 """Got n tokens ahead."""
e000355 @ThomasWaldmann python 3 port: automated changes by a slightly modified python-modernize
ThomasWaldmann authored
328 for x in range(n):
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
329 next(self)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
330
fdf9530 @mitsuhiko added parsing code for "for item in seq recursive" and improved parser i...
authored
331 def next_if(self, expr):
332 """Perform the token test and return the token if it matched.
333 Otherwise the return value is `None`.
334 """
335 if self.current.test(expr):
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
336 return next(self)
fdf9530 @mitsuhiko added parsing code for "for item in seq recursive" and improved parser i...
authored
337
338 def skip_if(self, expr):
9cf9591 @mitsuhiko fixed bug with static unicode strings and auto escaping
authored
339 """Like :meth:`next_if` but only returns `True` or `False`."""
fdf9530 @mitsuhiko added parsing code for "for item in seq recursive" and improved parser i...
authored
340 return self.next_if(expr) is not None
341
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
342 def __next__(self):
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
343 """Go one token ahead and return the old one"""
344 rv = self.current
fdf9530 @mitsuhiko added parsing code for "for item in seq recursive" and improved parser i...
authored
345 if self._pushed:
346 self.current = self._pushed.popleft()
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
347 elif self.current.type is not TOKEN_EOF:
fdf9530 @mitsuhiko added parsing code for "for item in seq recursive" and improved parser i...
authored
348 try:
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
349 self.current = next(self._iter)
fdf9530 @mitsuhiko added parsing code for "for item in seq recursive" and improved parser i...
authored
350 except StopIteration:
351 self.close()
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
352 return rv
353
354 def close(self):
355 """Close the stream."""
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
356 self.current = Token(self.current.lineno, TOKEN_EOF, '')
7d29562 @ThomasWaldmann python 3 port: manual fixes, remove 2to3 from setup.py, remove fixers
ThomasWaldmann authored
357 self._iter = None
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
358 self.closed = True
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
359
360 def expect(self, expr):
023b5e9 @mitsuhiko First extension interface documentation and updates in that interface
authored
361 """Expect a given token type and return it. This accepts the same
362 argument as :meth:`jinja2.lexer.Token.test`.
363 """
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
364 if not self.current.test(expr):
5dcb724 @mitsuhiko greatly improved error message reporting. This fixes #339
authored
365 expr = describe_token_expr(expr)
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
366 if self.current.type is TOKEN_EOF:
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
367 raise TemplateSyntaxError('unexpected end of template, '
368 'expected %r.' % expr,
369 self.current.lineno,
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
370 self.name, self.filename)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
371 raise TemplateSyntaxError("expected token %r, got %r" %
5dcb724 @mitsuhiko greatly improved error message reporting. This fixes #339
authored
372 (expr, describe_token(self.current)),
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
373 self.current.lineno,
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
374 self.name, self.filename)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
375 try:
376 return self.current
377 finally:
c87d4cf @mitsuhiko six.advance_iterator -> next
authored
378 next(self)
4325e37 @mitsuhiko moved code from datastructure into lexer as the module only holds code f...
authored
379
380
9a0078d @mitsuhiko Removed a few stdlib dependencies. This is the first step for IronPytho...
authored
381 def get_lexer(environment):
382 """Return a lexer which is probably cached."""
383 key = (environment.block_start_string,
384 environment.block_end_string,
385 environment.variable_start_string,
386 environment.variable_end_string,
387 environment.comment_start_string,
388 environment.comment_end_string,
389 environment.line_statement_prefix,
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
390 environment.line_comment_prefix,
9a0078d @mitsuhiko Removed a few stdlib dependencies. This is the first step for IronPytho...
authored
391 environment.trim_blocks,
c5f6531 @kristi hash lstrip_blocks in lexer cache
kristi authored
392 environment.lstrip_blocks,
7e912c6 @wking Add `keep_trailing_newline` to configure final endline stripping
wking authored
393 environment.newline_sequence,
394 environment.keep_trailing_newline)
9a0078d @mitsuhiko Removed a few stdlib dependencies. This is the first step for IronPytho...
authored
395 lexer = _lexer_cache.get(key)
396 if lexer is None:
397 lexer = Lexer(environment)
398 _lexer_cache[key] = lexer
399 return lexer
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
400
401
92f572f @mitsuhiko [svn] added new jinja trunk
authored
402 class Lexer(object):
b5124e6 @mitsuhiko moved trans extension from jinja2.i18n to jinja2.ext and fixed some more...
authored
403 """Class that implements a lexer for a given environment. Automatically
92f572f @mitsuhiko [svn] added new jinja trunk
authored
404 created by the environment class, usually you don't have to do that.
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
405
406 Note that the lexer is not automatically bound to an environment.
407 Multiple environments can share the same lexer.
92f572f @mitsuhiko [svn] added new jinja trunk
authored
408 """
409
410 def __init__(self, environment):
411 # shortcuts
412 c = lambda x: re.compile(x, re.M | re.S)
413 e = re.escape
414
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
415 # lexing rules for tags
92f572f @mitsuhiko [svn] added new jinja trunk
authored
416 tag_rules = [
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
417 (whitespace_re, TOKEN_WHITESPACE, None),
418 (float_re, TOKEN_FLOAT, None),
419 (integer_re, TOKEN_INTEGER, None),
420 (name_re, TOKEN_NAME, None),
421 (string_re, TOKEN_STRING, None),
422 (operator_re, TOKEN_OPERATOR, None)
92f572f @mitsuhiko [svn] added new jinja trunk
authored
423 ]
424
5c5d061 @yole couple of typos in comments
yole authored
425 # assemble the root lexing rule. because "|" is ungreedy
d874fbe @mitsuhiko [svn] improved lexer and template class
authored
426 # we have to sort by length so that the lexer continues working
427 # as expected when we have parsing rules like <% for block and
428 # <%= for variables. (if someone wants asp like syntax)
33d528a @mitsuhiko [svn] jinja can now handle variables and blocks with the same delimiters...
authored
429 # variables are just part of the rules if variable processing
430 # is required.
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
431 root_tag_rules = compile_rules(environment)
d874fbe @mitsuhiko [svn] improved lexer and template class
authored
432
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
433 # block suffix if trimming is enabled
434 block_suffix_re = environment.trim_blocks and '\\n?' or ''
435
59f3366 @kristi add lstrip_blocks environment setting, fix lexer
kristi authored
436 # strip leading spaces if lstrip_blocks is enabled
fdf8201 @kristi Don't match variable or comment blocks, Use {%+ to disable lstrip_blocks...
kristi authored
437 prefix_re = {}
438 if environment.lstrip_blocks:
54f7d2e @kristi Apply lstrip_blocks to comments (match trim_blocks behavior)
kristi authored
439 # use '{%+' to manually disable lstrip_blocks behavior
440 no_lstrip_re = e('+')
441 # detect overlap between block and variable or comment strings
442 block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
443 # make sure we don't mistake a block for a variable or a comment
444 m = block_diff.match(environment.comment_start_string)
445 no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
446 m = block_diff.match(environment.variable_start_string)
447 no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
448
449 # detect overlap between comment and variable strings
450 comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
451 m = comment_diff.match(environment.variable_start_string)
452 no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
453
fdf8201 @kristi Don't match variable or comment blocks, Use {%+ to disable lstrip_blocks...
kristi authored
454 lstrip_re = r'^[ \t]*'
455 block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
456 lstrip_re,
457 e(environment.block_start_string),
458 no_lstrip_re,
459 e(environment.block_start_string),
460 )
54f7d2e @kristi Apply lstrip_blocks to comments (match trim_blocks behavior)
kristi authored
461 comment_prefix_re = r'%s%s%s|%s\+?' % (
462 lstrip_re,
463 e(environment.comment_start_string),
464 no_variable_re,
465 e(environment.comment_start_string),
466 )
fdf8201 @kristi Don't match variable or comment blocks, Use {%+ to disable lstrip_blocks...
kristi authored
467 prefix_re['block'] = block_prefix_re
54f7d2e @kristi Apply lstrip_blocks to comments (match trim_blocks behavior)
kristi authored
468 prefix_re['comment'] = comment_prefix_re
fdf8201 @kristi Don't match variable or comment blocks, Use {%+ to disable lstrip_blocks...
kristi authored
469 else:
470 block_prefix_re = '%s' % e(environment.block_start_string)
59f3366 @kristi add lstrip_blocks environment setting, fix lexer
kristi authored
471
f3c35c4 @mitsuhiko end of line sequence is no configurable
authored
472 self.newline_sequence = environment.newline_sequence
7e912c6 @wking Add `keep_trailing_newline` to configure final endline stripping
wking authored
473 self.keep_trailing_newline = environment.keep_trailing_newline
f3c35c4 @mitsuhiko end of line sequence is no configurable
authored
474
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
475 # global lexing rules
92f572f @mitsuhiko [svn] added new jinja trunk
authored
476 self.rules = {
477 'root': [
523bf4c @mitsuhiko fixed awkward lexer bug in jinja that was yet untested
authored
478 # directives
479 (c('(.*?)(?:%s)' % '|'.join(
ac0c0d0 @mitsuhiko Committed fix for raw testcase.
authored
480 [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
523bf4c @mitsuhiko fixed awkward lexer bug in jinja that was yet untested
authored
481 e(environment.block_start_string),
15605a8 @kristi lstrip spaces (but not newlines) from block tags
kristi authored
482 block_prefix_re,
ac0c0d0 @mitsuhiko Committed fix for raw testcase.
authored
483 e(environment.block_end_string),
523bf4c @mitsuhiko fixed awkward lexer bug in jinja that was yet untested
authored
484 e(environment.block_end_string)
485 )] + [
fdf8201 @kristi Don't match variable or comment blocks, Use {%+ to disable lstrip_blocks...
kristi authored
486 r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
523bf4c @mitsuhiko fixed awkward lexer bug in jinja that was yet untested
authored
487 for n, r in root_tag_rules
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
488 ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
489 # data
db7985d @mitsuhiko Fixed bug in line-based comments with priority.
authored
490 (c('.+'), TOKEN_DATA, None)
92f572f @mitsuhiko [svn] added new jinja trunk
authored
491 ],
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
492 # comments
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
493 TOKEN_COMMENT_BEGIN: [
a5c8d58 @mitsuhiko [svn] checked in today's jinja changes which i forgot to commit
authored
494 (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
1151fbc @mitsuhiko [svn] improved jinja whitespace processing stuff
authored
495 e(environment.comment_end_string),
a5c8d58 @mitsuhiko [svn] checked in today's jinja changes which i forgot to commit
authored
496 e(environment.comment_end_string),
497 block_suffix_re
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
498 )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
92f572f @mitsuhiko [svn] added new jinja trunk
authored
499 (c('(.)'), (Failure('Missing end of comment tag'),), None)
500 ],
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
501 # blocks
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
502 TOKEN_BLOCK_BEGIN: [
a5c8d58 @mitsuhiko [svn] checked in today's jinja changes which i forgot to commit
authored
503 (c('(?:\-%s\s*|%s)%s' % (
1151fbc @mitsuhiko [svn] improved jinja whitespace processing stuff
authored
504 e(environment.block_end_string),
a5c8d58 @mitsuhiko [svn] checked in today's jinja changes which i forgot to commit
authored
505 e(environment.block_end_string),
506 block_suffix_re
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
507 )), TOKEN_BLOCK_END, '#pop'),
92f572f @mitsuhiko [svn] added new jinja trunk
authored
508 ] + tag_rules,
2e9396b @mitsuhiko reimplemented {% trans %}
authored
509 # variables
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
510 TOKEN_VARIABLE_BEGIN: [
2e9396b @mitsuhiko reimplemented {% trans %}
authored
511 (c('\-%s\s*|%s' % (
512 e(environment.variable_end_string),
513 e(environment.variable_end_string)
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
514 )), TOKEN_VARIABLE_END, '#pop')
2e9396b @mitsuhiko reimplemented {% trans %}
authored
515 ] + tag_rules,
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
516 # raw block
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
517 TOKEN_RAW_BEGIN: [
1151fbc @mitsuhiko [svn] improved jinja whitespace processing stuff
authored
518 (c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
519 e(environment.block_start_string),
15605a8 @kristi lstrip spaces (but not newlines) from block tags
kristi authored
520 block_prefix_re,
1151fbc @mitsuhiko [svn] improved jinja whitespace processing stuff
authored
521 e(environment.block_end_string),
522 e(environment.block_end_string),
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
523 block_suffix_re
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
524 )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
a6c3ac5 @mitsuhiko [svn] added jinja whitespace prevention system (tm) ^^
authored
525 (c('(.)'), (Failure('Missing end of raw directive'),), None)
2e9396b @mitsuhiko reimplemented {% trans %}
authored
526 ],
527 # line statements
b3b5802 @mitsuhiko Improved usage of string interning. This should fix the problems with j...
authored
528 TOKEN_LINESTATEMENT_BEGIN: [
529 (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
530 ] + tag_rules,
531 # line comments
532 TOKEN_LINECOMMENT_BEGIN: [
db7985d @mitsuhiko Fixed bug in line-based comments with priority.
authored
533 (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
534 TOKEN_LINECOMMENT_END), '#pop')
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
535 ]
2e9396b @mitsuhiko reimplemented {% trans %}
authored
536 }
bf7c4ad @mitsuhiko added support for line statement prefixes (cheetah/mako/erb like)
authored
537
f3c35c4 @mitsuhiko end of line sequence is no configurable
authored
538 def _normalize_newlines(self, value):
5c5d061 @yole couple of typos in comments
yole authored
539 """Called for strings and template data to normalize it to unicode."""
f3c35c4 @mitsuhiko end of line sequence is no configurable
authored
540 return newline_re.sub(self.newline_sequence, value)
541
ba6e25a @mitsuhiko Added support for `Environment.compile_expression`.
authored
542 def tokenize(self, source, name=None, filename=None, state=None):
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
543 """Calls tokeniter + tokenize and wraps it in a token stream.
92f572f @mitsuhiko [svn] added new jinja trunk
authored
544 """
ba6e25a @mitsuhiko Added support for `Environment.compile_expression`.
authored
545 stream = self.tokeniter(source, name, filename, state)
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
546 return TokenStream(self.wrap(stream, name, filename), name, filename)
547
548 def wrap(self, stream, name=None, filename=None):
549 """This is called with the stream as returned by `tokenize` and wraps
550 every token in a :class:`Token` and converts the value.
551 """
552 for lineno, token, value in stream:
59b6bd5 @mitsuhiko Added support for line-based comments.
authored
553 if token in ignored_tokens:
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
554 continue
555 elif token == 'linestatement_begin':
556 token = 'block_begin'
557 elif token == 'linestatement_end':
558 token = 'block_end'
559 # we are not interested in those tokens in the parser
560 elif token in ('raw_begin', 'raw_end'):
561 continue
562 elif token == 'data':
563 value = self._normalize_newlines(value)
564 elif token == 'keyword':
565 token = value
566 elif token == 'name':
567 value = str(value)
568 elif token == 'string':
569 # try to unescape string
570 try:
571 value = self._normalize_newlines(value[1:-1]) \
572 .encode('ascii', 'backslashreplace') \
573 .decode('unicode-escape')
e000355 @ThomasWaldmann python 3 port: automated changes by a slightly modified python-modernize
ThomasWaldmann authored
574 except Exception as e:
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
575 msg = str(e).split(':')[-1].strip()
576 raise TemplateSyntaxError(msg, lineno, name, filename)
577 # if we can express it as bytestring (ascii only)
578 # we do that for support of semi broken APIs
0d242be @mitsuhiko Down to 7 failures for Python 3. We're onto something.
authored
579 # as datetime.datetime.strftime. On python 3 this
580 # call becomes a noop thanks to 2to3
06badeb @mitsuhiko Fix for #304
authored
581 if PY2:
582 try:
583 value = value.encode('ascii')
584 except UnicodeError:
585 pass
9ad96e7 @mitsuhiko added support for token stream filtering and preprocessing.
authored
586 elif token == 'integer':
587 value = int(value)
588 elif token == 'float':
589 value = float(value)
590 elif token == 'operator':
591 token = operators[value]
592 yield Token(lineno, token, value)
92f572f @mitsuhiko [svn] added new jinja trunk
authored
593
ba6e25a @mitsuhiko Added support for `Environment.compile_expression`.
authored
594 def tokeniter(self, source, name, filename=None, state=None):
b5124e6 @mitsuhiko moved trans extension from jinja2.i18n to jinja2.ext and fixed some more...
authored
595 """This method tokenizes the text and returns the tokens in a
596 generator. Use this method if you just want to tokenize a template.
92f572f @mitsuhiko [svn] added new jinja trunk
authored
597 """
e909867 @mitsuhiko Moved all six usages (ignoring testsuite) into jinja2._compat
authored
598 source = text_type(source)
7e912c6 @wking Add `keep_trailing_newline` to configure final endline stripping
wking authored
599 lines = source.splitlines()
600 if self.keep_trailing_newline and source:
601 for newline in ('\r\n', '\r', '\n'):
602 if source.endswith(newline):
603 lines.append('')
604 break
605 source = '\n'.join(lines)
7977e5c @mitsuhiko [svn] implemented jinja debugging system
authored
606 pos = 0
607 lineno = 1
92f572f @mitsuhiko [svn] added new jinja trunk
authored
608 stack = ['root']
ba6e25a @mitsuhiko Added support for `Environment.compile_expression`.
authored
609 if state is not None and state != 'root':
610 assert state in ('variable', 'block'), 'invalid state'
611 stack.append(state + '_begin')
612 else:
613 state = 'root'
614 statetokens = self.rules[stack[-1]]
92f572f @mitsuhiko [svn] added new jinja trunk
authored
615 source_length = len(source)
616
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
617 balancing_stack = []
618
7108207 @mitsuhiko added support for new call statement
authored
619 while 1:
92f572f @mitsuhiko [svn] added new jinja trunk
authored
620 # tokenizer loop
621 for regex, tokens, new_state in statetokens:
622 m = regex.match(source, pos)
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
623 # if no match we try again with the next rule
7108207 @mitsuhiko added support for new call statement
authored
624 if m is None:
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
625 continue
626
5c5d061 @yole couple of typos in comments
yole authored
627 # we only match blocks and variables if braces / parentheses
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
628 # are balanced. continue parsing with the lower rule which
629 # is the operator rule. do this only if the end tags look
630 # like operators
631 if balancing_stack and \
7108207 @mitsuhiko added support for new call statement
authored
632 tokens in ('variable_end', 'block_end',
633 'linestatement_end'):
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
634 continue
635
636 # tuples support more options
637 if isinstance(tokens, tuple):
638 for idx, token in enumerate(tokens):
639 # failure group
d8b8c3e @mitsuhiko lex includes whitespace now which makes it a lot more useful
authored
640 if token.__class__ is Failure:
720e55b @mitsuhiko [svn] fixed a lexer error reporting issue
authored
641 raise token(lineno, filename)
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
642 # bygroup is a bit more complex, in that case we
643 # yield for the current token the first named
644 # group that matched
645 elif token == '#bygroup':
e909867 @mitsuhiko Moved all six usages (ignoring testsuite) into jinja2._compat
authored
646 for key, value in iteritems(m.groupdict()):
92f572f @mitsuhiko [svn] added new jinja trunk
authored
647 if value is not None:
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
648 yield lineno, key, value
649 lineno += value.count('\n')
92f572f @mitsuhiko [svn] added new jinja trunk
authored
650 break
651 else:
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
652 raise RuntimeError('%r wanted to resolve '
653 'the token dynamically'
654 ' but no group matched'
655 % regex)
656 # normal group
92f572f @mitsuhiko [svn] added new jinja trunk
authored
657 else:
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
658 data = m.group(idx + 1)
db7985d @mitsuhiko Fixed bug in line-based comments with priority.
authored
659 if data or token not in ignore_if_empty:
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
660 yield lineno, token, data
661 lineno += data.count('\n')
662
7108207 @mitsuhiko added support for new call statement
authored
663 # strings as token just are yielded as it.
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
664 else:
665 data = m.group()
666 # update brace/parentheses balance
667 if tokens == 'operator':
668 if data == '{':
669 balancing_stack.append('}')
670 elif data == '(':
671 balancing_stack.append(')')
672 elif data == '[':
673 balancing_stack.append(']')
674 elif data in ('}', ')', ']'):
f750daa @mitsuhiko [svn] improved generic "EOF while lexing" exception message so that it t...
authored
675 if not balancing_stack:
5dcb724 @mitsuhiko greatly improved error message reporting. This fixes #339
authored
676 raise TemplateSyntaxError('unexpected \'%s\'' %
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
677 data, lineno, name,
f750daa @mitsuhiko [svn] improved generic "EOF while lexing" exception message so that it t...
authored
678 filename)
679 expected_op = balancing_stack.pop()
680 if expected_op != data:
5dcb724 @mitsuhiko greatly improved error message reporting. This fixes #339
authored
681 raise TemplateSyntaxError('unexpected \'%s\', '
682 'expected \'%s\'' %
f750daa @mitsuhiko [svn] improved generic "EOF while lexing" exception message so that it t...
authored
683 (data, expected_op),
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
684 lineno, name,
685 filename)
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
686 # yield items
db7985d @mitsuhiko Fixed bug in line-based comments with priority.
authored
687 if data or tokens not in ignore_if_empty:
688 yield lineno, tokens, data
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
689 lineno += data.count('\n')
690
691 # fetch new position into new variable so that we can check
692 # if there is a internal parsing error which would result
693 # in an infinite loop
694 pos2 = m.end()
695
696 # handle state changes
697 if new_state is not None:
698 # remove the uppermost state
699 if new_state == '#pop':
700 stack.pop()
701 # resolve the new state by group checking
702 elif new_state == '#bygroup':
e909867 @mitsuhiko Moved all six usages (ignoring testsuite) into jinja2._compat
authored
703 for key, value in iteritems(m.groupdict()):
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
704 if value is not None:
705 stack.append(key)
706 break
707 else:
708 raise RuntimeError('%r wanted to resolve the '
709 'new state dynamically but'
710 ' no group matched' %
711 regex)
712 # direct state name given
713 else:
714 stack.append(new_state)
715 statetokens = self.rules[stack[-1]]
716 # we are still at the same position and no stack change.
717 # this means a loop without break condition, avoid that and
718 # raise error
719 elif pos2 == pos:
720 raise RuntimeError('%r yielded empty string without '
721 'stack change' % regex)
722 # publish new function and start again
723 pos = pos2
724 break
5c5d061 @yole couple of typos in comments
yole authored
725 # if loop terminated without break we haven't found a single match
92f572f @mitsuhiko [svn] added new jinja trunk
authored
726 # either we are at the end of the file or we have a problem
727 else:
728 # end of text
729 if pos >= source_length:
730 return
731 # something went wrong
732 raise TemplateSyntaxError('unexpected char %r at %d' %
2158091 @mitsuhiko [svn] various updates i haven't checked in so far (see the diff of the c...
authored
733 (source[pos], pos), lineno,
7f15ef8 @mitsuhiko improved exception system. now both name (load name) and filename are p...
authored
734 name, filename)
Something went wrong with that request. Please try again.