Permalink
Browse files

fixed more unittests

--HG--
branch : trunk
  • Loading branch information...
1 parent 2b60fe5 commit 4f7d2d56ab996050c9094f9426969028db0c8aa6 @mitsuhiko mitsuhiko committed Apr 22, 2008
Showing with 228 additions and 189 deletions.
  1. +1 −1 jinja2/__init__.py
  2. +3 −1 jinja2/defaults.py
  3. +3 −2 jinja2/ext.py
  4. +17 −7 jinja2/i18n.py
  5. +6 −3 jinja2/lexer.py
  6. +10 −3 jinja2/nodes.py
  7. +6 −4 jinja2/optimizer.py
  8. +4 −4 jinja2/parser.py
  9. +8 −1 jinja2/runtime.py
  10. +74 −2 jinja2/utils.py
  11. +9 −0 jinja2/visitor.py
  12. +36 −44 tests/test_i18n.py
  13. +11 −29 tests/test_parser.py
  14. +29 −26 tests/test_security.py
  15. +11 −62 tests/test_various.py
View
@@ -61,4 +61,4 @@
DictLoader
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
from jinja2.filters import environmentfilter, contextfilter
-from jinja2.utils import Markup, escape
+from jinja2.utils import Markup, escape, contextfunction
View
@@ -10,8 +10,10 @@
"""
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
+from jinja2.utils import generate_lorem_ipsum
DEFAULT_NAMESPACE = {
- 'range': xrange
+ 'range': xrange,
+ 'lipsum': generate_lorem_ipsum
}
View
@@ -40,10 +40,11 @@ class CacheExtension(Extension):
def parse(self, parser):
lineno = parser.stream.next().lineno
args = [parser.parse_expression()]
- if self.stream.current.type is 'comma':
+ if parser.stream.current.type is 'comma':
+ parser.stream.next()
args.append(parser.parse_expression())
body = parser.parse_statements(('name:endcache',), drop_needle=True)
return nodes.CallBlock(
- nodes.Call(nodes.Name('cache_support'), args, [], None, None),
+ nodes.Call(nodes.Name('cache_support', 'load'), args, [], None, None),
[], [], body
)
View
@@ -34,7 +34,7 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS):
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
"""
- for call in node.find_all(nodes.Call):
+ for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
@@ -43,7 +43,7 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS):
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, basestring):
- strings.append(arg)
+ strings.append(arg.value)
else:
strings.append(None)
@@ -67,6 +67,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
(comments will be empty currently)
"""
encoding = options.get('encoding', 'utf-8')
+ extensions = [x.strip() for x in options.get('extensions', '').split(',')]
environment = Environment(
options.get('block_start_string', '{%'),
options.get('block_end_string', '%}'),
@@ -76,9 +77,18 @@ def babel_extract(fileobj, keywords, comment_tags, options):
options.get('comment_end_string', '#}'),
options.get('line_statement_prefix') or None,
options.get('trim_blocks', '').lower() in ('1', 'on', 'yes', 'true'),
- extensions=[x.strip() for x in options.get('extensions', '')
- .split(',')] + [TransExtension]
+ extensions=[x for x in extensions if x]
)
+
+ # add the i18n extension only if it's not yet in the list. Some people
+ # might use a script to sync the babel ini with the Jinja configuration
+ # so we want to avoid having the trans extension twice in the list.
+ for extension in environment.extensions:
+ if isinstance(extension, TransExtension):
+ break
+ else:
+ environment.extensions.append(TransExtension(environment))
+
node = environment.parse(fileobj.read().decode(encoding))
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, []
@@ -163,10 +173,10 @@ def parse(self, parser):
plural = plural.replace('%%', '%')
if not have_plural:
- if plural_expr is None:
- raise TemplateAssertionError('pluralize without variables',
- lineno, parser.filename)
plural_expr = None
+ elif plural_expr is None:
+ raise TemplateAssertionError('pluralize without variables',
+ lineno, parser.filename)
if variables:
variables = nodes.Dict([nodes.Pair(nodes.Const(x, lineno=lineno), y)
View
@@ -38,7 +38,7 @@
# set of used keywords
keywords = set(['and', 'block', 'elif', 'else', 'endblock', 'print',
'endfilter', 'endfor', 'endif', 'endmacro', 'endraw',
- 'extends', 'filter', 'for', 'if', 'in', 'include'
+ 'extends', 'filter', 'for', 'if', 'in', 'include',
'is', 'macro', 'not', 'or', 'raw', 'call', 'endcall'])
# bind operators to token types
@@ -246,7 +246,7 @@ def __init__(self, environment):
('block', environment.block_start_string),
('variable', environment.variable_start_string)
]
- root_tag_rules.sort(key=lambda x: len(x[1]))
+ root_tag_rules.sort(key=lambda x: -len(x[1]))
# now escape the rules. This is done here so that the escape
# signs don't count for the lengths of the tags.
@@ -320,7 +320,7 @@ def __init__(self, environment):
def tokenize(self, source, filename=None):
"""Works like `tokeniter` but returns a tokenstream of tokens and not
- a generator or token tuples. Additionally all token values are already
+ a generator or token tuples. Additionally all token values are already
converted into types and postprocessed. For example keywords are
already keyword tokens, not named tokens, comments are removed,
integers and floats converted, strings unescaped etc.
@@ -334,6 +334,9 @@ def generate():
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
+ # we are not interested in those tokens in the parser
+ elif token in ('raw_begin', 'raw_end'):
+ continue
elif token == 'data':
try:
value = str(value)
View
@@ -9,10 +9,11 @@
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
- :copyright: 2007 by Armin Ronacher.
+ :copyright: 2008 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import operator
+from types import FunctionType
from itertools import chain, izip
from collections import deque
from copy import copy
@@ -42,7 +43,7 @@
'lt': operator.lt,
'lteq': operator.le,
'in': operator.contains,
- 'notin': lambda a, b: not operator.contains(a, b)
+ 'notin': lambda a, b: b not in a
}
@@ -449,6 +450,12 @@ class Call(Expr):
def as_const(self):
obj = self.node.as_const()
+
+ # don't evaluate context functions
+ if type(obj) is FunctionType and \
+ getattr(obj, 'contextfunction', False):
+ raise Impossible()
+
args = [x.as_const() for x in self.args]
kwargs = dict(x.as_const() for x in self.kwargs)
if self.dyn_args is not None:
@@ -480,7 +487,7 @@ def as_const(self):
raise Impossible()
def can_assign(self):
- return True
+ return False
class Slice(Expr):
View
@@ -32,6 +32,8 @@
# - multiple Output() nodes should be concatenated into one node.
# for example the i18n system could output such nodes:
# "foo{% trans %}bar{% endtrans %}blah"
+# - when unrolling loops local sets become global sets :-/
+# see also failing test case `test_localset` in test_various
def optimize(node, environment, context_hint=None):
@@ -183,12 +185,12 @@ def assign(target, value):
for item, loop in LoopContext(iterable, True):
context['loop'] = loop.make_static()
assign(node.target, item)
- result.extend(self.visit(n.copy(), context)
- for n in node.body)
+ for n in node.body:
+ result.extend(self.visit_list(n.copy(), context))
iterated = True
if not iterated and node.else_:
- result.extend(self.visit(n.copy(), context)
- for n in node.else_)
+ for n in node.else_:
+ result.extend(self.visit_list(n.copy(), context))
except nodes.Impossible:
return node
finally:
View
@@ -64,8 +64,8 @@ def parse_assign(self, target):
lineno = self.stream.expect('assign').lineno
if not target.can_assign():
raise TemplateSyntaxError("can't assign to '%s'" %
- target, target.lineno,
- self.filename)
+ target.__class__.__name__.lower(),
+ target.lineno, self.filename)
expr = self.parse_tuple()
target.set_ctx('store')
return nodes.Assign(target, expr, lineno=lineno)
@@ -94,8 +94,8 @@ def parse_for(self):
target = self.parse_tuple(simplified=True)
if not target.can_assign():
raise TemplateSyntaxError("can't assign to '%s'" %
- target, target.lineno,
- self.filename)
+ target.__class__.__name__.lower(),
+ target.lineno, self.filename)
target.set_ctx('store')
self.stream.expect('in')
iter = self.parse_tuple(no_condexpr=True)
View
@@ -12,7 +12,8 @@
from collections import defaultdict
except ImportError:
defaultdict = None
-from jinja2.utils import Markup
+from types import FunctionType
+from jinja2.utils import Markup, partial
from jinja2.exceptions import UndefinedError
@@ -34,6 +35,12 @@ def __init__(self, environment, globals, name, blocks, standalone):
self.name = name
self.blocks = dict((k, [v]) for k, v in blocks.iteritems())
+ # give all context functions the context as first argument
+ for key, value in self.iteritems():
+ if type(value) is FunctionType and \
+ getattr(value, 'contextfunction', False):
+ dict.__setitem__(self, key, partial(value, self))
+
# if the template is in standalone mode we don't copy the blocks over.
# this is used for includes for example but otherwise, if the globals
# are a template context, this template is participating in a template
View
@@ -12,7 +12,6 @@
import string
from collections import deque
from copy import deepcopy
-from functools import update_wrapper
from itertools import imap
@@ -26,6 +25,14 @@
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
+def contextfunction(f):
+ """Mark a callable as context callable. A context callable is passed
+ the active context as first argument.
+ """
+ f.contextfunction = True
+ return f
+
+
def import_string(import_name, silent=False):
"""Imports an object based on a string. This use useful if you want to
use import paths as endpoints or something similar. An import path can
@@ -108,6 +115,55 @@ def urlize(text, trim_url_limit=None, nofollow=False):
return u''.join(words)
+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+ """Generate some lorem impsum for the template."""
+ from jinja2.constants import LOREM_IPSUM_WORDS
+ from random import choice, random, randrange
+ words = LOREM_IPSUM_WORDS.split()
+ result = []
+
+ for _ in xrange(n):
+ next_capitalized = True
+ last_comma = last_fullstop = 0
+ word = None
+ last = None
+ p = []
+
+ # each paragraph contains out of 20 to 100 words.
+ for idx, _ in enumerate(xrange(randrange(min, max))):
+ while True:
+ word = choice(words)
+ if word != last:
+ last = word
+ break
+ if next_capitalized:
+ word = word.capitalize()
+ next_capitalized = False
+ # add commas
+ if idx - randrange(3, 8) > last_comma:
+ last_comma = idx
+ last_fullstop += 2
+ word += ','
+ # add end of sentences
+ if idx - randrange(10, 20) > last_fullstop:
+ last_comma = last_fullstop = idx
+ word += '.'
+ next_capitalized = True
+ p.append(word)
+
+ # ensure that the paragraph ends with a dot.
+ p = u' '.join(p)
+ if p.endswith(','):
+ p = p[:-1] + '.'
+ elif not p.endswith('.'):
+ p += '.'
+ result.append(p)
+
+ if not html:
+ return u'\n\n'.join(result)
+ return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
+
+
class Markup(unicode):
"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
@@ -178,7 +234,9 @@ def func(self, *args, **kwargs):
if hasattr(arg, '__html__') or isinstance(arg, basestring):
kwargs[name] = escape(arg)
return self.__class__(orig(self, *args, **kwargs))
- return update_wrapper(func, orig, ('__name__', '__doc__'))
+ func.__name__ = orig.__name__
+ func.__doc__ = orig.__doc__
+ return func
for method in '__getitem__', '__getslice__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'partition', 'center', \
@@ -339,3 +397,17 @@ def soft_unicode(s):
if not isinstance(s, unicode):
s = unicode(s)
return s
+
+
+# partials
+try:
+ from functools import partial
+except ImportError:
+ class partial(object):
+ def __init__(self, _func, *args, **kwargs):
+ self._func = func
+ self._args = args
+ self._kwargs = kwargs
+ def __call__(self, *args, **kwargs):
+ kwargs.update(self._kwargs)
+ return self._func(*(self._args + args), **kwargs)
View
@@ -77,3 +77,12 @@ def generic_visit(self, node, *args, **kwargs):
else:
setattr(node, field, new_node)
return node
+
+ def visit_list(self, node, *args, **kwargs):
+ """As transformers may return lists in some places this method
+ can be used to enforce a list as return value.
+ """
+ rv = self.visit(node, *args, **kwargs)
+ if not isinstance(rv, list):
+ rv = [rv]
+ return rv
Oops, something went wrong.

0 comments on commit 4f7d2d5

Please sign in to comment.