Skip to content
Browse files

Add Cheetah 2.44 source code

  • Loading branch information...
1 parent 4726290 commit e9073c33273705bd7e91f41437a2c790f303c112 @bcui6611 committed
Showing with 15,887 additions and 0 deletions.
  1. +136 −0 cheetah/CacheRegion.py
  2. +106 −0 cheetah/CacheStore.py
  3. +98 −0 cheetah/DirectiveAnalyzer.py
  4. +16 −0 cheetah/Django.py
  5. +108 −0 cheetah/DummyTransaction.py
  6. +62 −0 cheetah/ErrorCatchers.py
  7. +357 −0 cheetah/FileUtils.py
  8. +212 −0 cheetah/Filters.py
  9. +129 −0 cheetah/ImportHooks.py
  10. +541 −0 cheetah/ImportManager.py
  11. +67 −0 cheetah/Macros/I18n.py
  12. +1 −0 cheetah/Macros/__init__.py
  13. +2,661 −0 cheetah/Parser.py
  14. +48 −0 cheetah/Servlet.py
  15. +284 −0 cheetah/SettingsManager.py
  16. +267 −0 cheetah/SourceReader.py
  17. +1,941 −0 cheetah/Template.py
  18. +107 −0 cheetah/TemplateCmdLineIface.py
  19. +272 −0 cheetah/Templates/SkeletonPage.py
  20. +44 −0 cheetah/Templates/SkeletonPage.tmpl
  21. +215 −0 cheetah/Templates/_SkeletonPage.py
  22. +1 −0 cheetah/Templates/__init__.py
  23. +29 −0 cheetah/Tests/Analyzer.py
  24. +579 −0 cheetah/Tests/CheetahWrapper.py
  25. +39 −0 cheetah/Tests/Cheps.py
  26. +70 −0 cheetah/Tests/Filters.py
  27. +20 −0 cheetah/Tests/Misc.py
  28. +548 −0 cheetah/Tests/NameMapper.py
  29. +49 −0 cheetah/Tests/Parser.py
  30. +243 −0 cheetah/Tests/Performance.py
  31. +247 −0 cheetah/Tests/Regressions.py
  32. +3,253 −0 cheetah/Tests/SyntaxAndOutput.py
  33. +363 −0 cheetah/Tests/Template.py
  34. +53 −0 cheetah/Tests/Test.py
  35. +237 −0 cheetah/Tests/Unicode.py
  36. +1 −0 cheetah/Tests/__init__.py
  37. +381 −0 cheetah/Tests/xmlrunner.py
  38. +77 −0 cheetah/Tools/CGITemplate.py
  39. +464 −0 cheetah/Tools/MondoReport.py
  40. +391 −0 cheetah/Tools/MondoReportDoc.txt
  41. +28 −0 cheetah/Tools/RecursiveNull.py
  42. +166 −0 cheetah/Tools/SiteHierarchy.py
  43. +8 −0 cheetah/Tools/__init__.py
  44. +5 −0 cheetah/Tools/turbocheetah/__init__.py
  45. +110 −0 cheetah/Tools/turbocheetah/cheetahsupport.py
  46. +1 −0 cheetah/Tools/turbocheetah/tests/__init__.py
  47. +66 −0 cheetah/Tools/turbocheetah/tests/test_template.py
  48. +9 −0 cheetah/Unspecified.py
  49. +123 −0 cheetah/Utils/Indenter.py
  50. +67 −0 cheetah/Utils/Misc.py
  51. +102 −0 cheetah/Utils/WebInputMixin.py
  52. +1 −0 cheetah/Utils/__init__.py
  53. +14 −0 cheetah/Utils/htmlDecode.py
  54. +21 −0 cheetah/Utils/htmlEncode.py
  55. +304 −0 cheetah/Utils/statprof.py
  56. +58 −0 cheetah/Version.py
  57. +20 −0 cheetah/__init__.py
  58. +47 −0 cheetah/c/Cheetah.h
  59. +20 −0 cheetah/convertTmplPathToModuleName.py
View
136 cheetah/CacheRegion.py
@@ -0,0 +1,136 @@
+# $Id: CacheRegion.py,v 1.3 2006/01/28 04:19:30 tavis_rudd Exp $
+'''
+Cache holder classes for Cheetah:
+
+Cache regions are defined using the #cache Cheetah directive. Each
+cache region can be viewed as a dictionary (keyed by cacheRegionID)
+handling at least one cache item (the default one). It's possible to add
+cacheItems in a region by using the `varyBy` #cache directive parameter as
+in the following example::
+ #def getArticle
+ this is the article content.
+ #end def
+
+ #cache varyBy=$getArticleID()
+ $getArticle($getArticleID())
+ #end cache
+
+The code above will generate a CacheRegion and add new cacheItem for each value
+of $getArticleID().
+'''
+
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+import time
+import Cheetah.CacheStore
+
+class CacheItem(object):
+ '''
+ A CacheItem is a container storing:
+
+ - cacheID (string)
+ - refreshTime (timestamp or None) : last time the cache was refreshed
+ - data (string) : the content of the cache
+ '''
+
+ def __init__(self, cacheItemID, cacheStore):
+ self._cacheItemID = cacheItemID
+ self._cacheStore = cacheStore
+ self._refreshTime = None
+ self._expiryTime = 0
+
+ def hasExpired(self):
+ return (self._expiryTime and time.time() > self._expiryTime)
+
+ def setExpiryTime(self, time):
+ self._expiryTime = time
+
+ def getExpiryTime(self):
+ return self._expiryTime
+
+ def setData(self, data):
+ self._refreshTime = time.time()
+ self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
+
+ def getRefreshTime(self):
+ return self._refreshTime
+
+ def getData(self):
+ assert self._refreshTime
+ return self._cacheStore.get(self._cacheItemID)
+
+ def renderOutput(self):
+ """Can be overridden to implement edge-caching"""
+ return self.getData() or ""
+
+ def clear(self):
+ self._cacheStore.delete(self._cacheItemID)
+ self._refreshTime = None
+
+class _CacheDataStoreWrapper(object):
+ def __init__(self, dataStore, keyPrefix):
+ self._dataStore = dataStore
+ self._keyPrefix = keyPrefix
+
+ def get(self, key):
+ return self._dataStore.get(self._keyPrefix+key)
+
+ def delete(self, key):
+ self._dataStore.delete(self._keyPrefix+key)
+
+ def set(self, key, val, time=0):
+ self._dataStore.set(self._keyPrefix+key, val, time=time)
+
+class CacheRegion(object):
+ '''
+ A `CacheRegion` stores some `CacheItem` instances.
+
+ This implementation stores the data in the memory of the current process.
+ If you need a more advanced data store, create a cacheStore class that works
+ with Cheetah's CacheStore protocol and provide it as the cacheStore argument
+ to __init__. For example you could use
+ Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
+ memcached API (http://www.danga.com/memcached).
+ '''
+ _cacheItemClass = CacheItem
+
+ def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
+ self._isNew = True
+ self._regionID = regionID
+ self._templateCacheIdPrefix = templateCacheIdPrefix
+ if not cacheStore:
+ cacheStore = Cheetah.CacheStore.MemoryCacheStore()
+ self._cacheStore = cacheStore
+ self._wrappedCacheDataStore = _CacheDataStoreWrapper(
+ cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':')
+ self._cacheItems = {}
+
+ def isNew(self):
+ return self._isNew
+
+ def clear(self):
+ " drop all the caches stored in this cache region "
+ for cacheItemId in self._cacheItems.keys():
+ cacheItem = self._cacheItems[cacheItemId]
+ cacheItem.clear()
+ del self._cacheItems[cacheItemId]
+
+ def getCacheItem(self, cacheItemID):
+ """ Lazy access to a cacheItem
+
+ Try to find a cache in the stored caches. If it doesn't
+ exist, it's created.
+
+ Returns a `CacheItem` instance.
+ """
+ cacheItemID = md5(str(cacheItemID)).hexdigest()
+
+ if cacheItemID not in self._cacheItems:
+ cacheItem = self._cacheItemClass(
+ cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore)
+ self._cacheItems[cacheItemID] = cacheItem
+ self._isNew = False
+ return self._cacheItems[cacheItemID]
View
106 cheetah/CacheStore.py
@@ -0,0 +1,106 @@
+'''
+Provides several CacheStore backends for Cheetah's caching framework. The
+methods provided by these classes have the same semantics as those in the
+python-memcached API, except for their return values:
+
+set(key, val, time=0)
+ set the value unconditionally
+add(key, val, time=0)
+ set only if the server doesn't already have this key
+replace(key, val, time=0)
+ set only if the server already have this key
+get(key, val)
+ returns val or raises a KeyError
+delete(key)
+ deletes or raises a KeyError
+'''
+import time
+
+class Error(Exception):
+ pass
+
+class AbstractCacheStore(object):
+
+ def set(self, key, val, time=None):
+ raise NotImplementedError
+
+ def add(self, key, val, time=None):
+ raise NotImplementedError
+
+ def replace(self, key, val, time=None):
+ raise NotImplementedError
+
+ def delete(self, key):
+ raise NotImplementedError
+
+ def get(self, key):
+ raise NotImplementedError
+
+class MemoryCacheStore(AbstractCacheStore):
+ def __init__(self):
+ self._data = {}
+
+ def set(self, key, val, time=0):
+ self._data[key] = (val, time)
+
+ def add(self, key, val, time=0):
+ if key in self._data:
+ raise Error('a value for key %r is already in the cache'%key)
+ self._data[key] = (val, time)
+
+ def replace(self, key, val, time=0):
+ if key in self._data:
+ raise Error('a value for key %r is already in the cache'%key)
+ self._data[key] = (val, time)
+
+ def delete(self, key):
+ del self._data[key]
+
+ def get(self, key):
+ (val, exptime) = self._data[key]
+ if exptime and time.time() > exptime:
+ del self._data[key]
+ raise KeyError(key)
+ else:
+ return val
+
+ def clear(self):
+ self._data.clear()
+
+class MemcachedCacheStore(AbstractCacheStore):
+ servers = ('127.0.0.1:11211')
+ def __init__(self, servers=None, debug=False):
+ if servers is None:
+ servers = self.servers
+ from memcache import Client as MemcachedClient
+ self._client = MemcachedClient(servers, debug)
+
+ def set(self, key, val, time=0):
+ self._client.set(key, val, time)
+
+ def add(self, key, val, time=0):
+ res = self._client.add(key, val, time)
+ if not res:
+ raise Error('a value for key %r is already in the cache'%key)
+ self._data[key] = (val, time)
+
+ def replace(self, key, val, time=0):
+ res = self._client.replace(key, val, time)
+ if not res:
+ raise Error('a value for key %r is already in the cache'%key)
+ self._data[key] = (val, time)
+
+ def delete(self, key):
+ res = self._client.delete(key, time=0)
+ if not res:
+ raise KeyError(key)
+
+ def get(self, key):
+ val = self._client.get(key)
+ if val is None:
+ raise KeyError(key)
+ else:
+ return val
+
+ def clear(self):
+ self._client.flush_all()
View
98 cheetah/DirectiveAnalyzer.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+import os
+import pprint
+
+try:
+ from functools import reduce
+except ImportError:
+ # Assume we have reduce
+ pass
+
+from Cheetah import Parser
+from Cheetah import Compiler
+from Cheetah import Template
+
+class Analyzer(Parser.Parser):
+ def __init__(self, *args, **kwargs):
+ self.calls = {}
+ super(Analyzer, self).__init__(*args, **kwargs)
+
+ def eatDirective(self):
+ directive = self.matchDirective()
+ try:
+ self.calls[directive] += 1
+ except KeyError:
+ self.calls[directive] = 1
+ super(Analyzer, self).eatDirective()
+
+class AnalysisCompiler(Compiler.ModuleCompiler):
+ parserClass = Analyzer
+
+
+def analyze(source):
+ klass = Template.Template.compile(source, compilerClass=AnalysisCompiler)
+ return klass._CHEETAH_compilerInstance._parser.calls
+
+def main_file(f):
+ fd = open(f, 'r')
+ try:
+ print u'>>> Analyzing %s' % f
+ calls = analyze(fd.read())
+ return calls
+ finally:
+ fd.close()
+
+
+def _find_templates(directory, suffix):
+ for root, dirs, files in os.walk(directory):
+ for f in files:
+ if not f.endswith(suffix):
+ continue
+ yield root + os.path.sep + f
+
+def _analyze_templates(iterable):
+ for template in iterable:
+ yield main_file(template)
+
+def main_dir(opts):
+ results = _analyze_templates(_find_templates(opts.dir, opts.suffix))
+ totals = {}
+ for series in results:
+ if not series:
+ continue
+ for k, v in series.iteritems():
+ try:
+ totals[k] += v
+ except KeyError:
+ totals[k] = v
+ return totals
+
+
+def main():
+ from optparse import OptionParser
+ op = OptionParser()
+ op.add_option('-f', '--file', dest='file', default=None,
+ help='Specify a single file to analyze')
+ op.add_option('-d', '--dir', dest='dir', default=None,
+ help='Specify a directory of templates to analyze')
+ op.add_option('--suffix', default='tmpl', dest='suffix',
+ help='Specify a custom template file suffix for the -d option (default: "tmpl")')
+ opts, args = op.parse_args()
+
+ if not opts.file and not opts.dir:
+ op.print_help()
+ return
+
+ results = None
+ if opts.file:
+ results = main_file(opts.file)
+ if opts.dir:
+ results = main_dir(opts)
+
+ pprint.pprint(results)
+
+
+if __name__ == '__main__':
+ main()
+
View
16 cheetah/Django.py
@@ -0,0 +1,16 @@
+import Cheetah.Template
+
+def render(template_file, **kwargs):
+ '''
+ Cheetah.Django.render() takes the template filename
+ (the filename should be a file in your Django
+ TEMPLATE_DIRS)
+
+ Any additional keyword arguments are passed into the
+ template are propogated into the template's searchList
+ '''
+ import django.http
+ import django.template.loader
+ source, loader = django.template.loader.find_template_source(template_file)
+ t = Cheetah.Template.Template(source, searchList=[kwargs])
+ return django.http.HttpResponse(t.__str__())
View
108 cheetah/DummyTransaction.py
@@ -0,0 +1,108 @@
+
+'''
+Provides dummy Transaction and Response classes is used by Cheetah in place
+of real Webware transactions when the Template obj is not used directly as a
+Webware servlet.
+
+Warning: This may be deprecated in the future, please do not rely on any
+specific DummyTransaction or DummyResponse behavior
+'''
+
+import logging
+import types
+
+class DummyResponseFailure(Exception):
+ pass
+
+class DummyResponse(object):
+ '''
+ A dummy Response class is used by Cheetah in place of real Webware
+ Response objects when the Template obj is not used directly as a Webware
+ servlet
+ '''
+ def __init__(self):
+ self._outputChunks = []
+
+ def flush(self):
+ pass
+
+ def safeConvert(self, chunk):
+ # Exceptionally gross, but the safest way
+ # I've found to ensure I get a legit unicode object
+ if not chunk:
+ return u''
+ if isinstance(chunk, unicode):
+ return chunk
+ try:
+ return chunk.decode('utf-8', 'strict')
+ except UnicodeDecodeError:
+ try:
+ return chunk.decode('latin-1', 'strict')
+ except UnicodeDecodeError:
+ return chunk.decode('ascii', 'ignore')
+ except AttributeError:
+ return unicode(chunk, errors='ignore')
+ return chunk
+
+ def write(self, value):
+ self._outputChunks.append(value)
+
+ def writeln(self, txt):
+ write(txt)
+ write('\n')
+
+ def getvalue(self, outputChunks=None):
+ chunks = outputChunks or self._outputChunks
+ try:
+ return u''.join(chunks)
+ except UnicodeDecodeError, ex:
+ logging.debug('Trying to work around a UnicodeDecodeError in getvalue()')
+ logging.debug('...perhaps you could fix "%s" while you\'re debugging')
+ return ''.join((self.safeConvert(c) for c in chunks))
+
+ def writelines(self, *lines):
+ ## not used
+ [self.writeln(ln) for ln in lines]
+
+
+class DummyTransaction(object):
+ '''
+ A dummy Transaction class is used by Cheetah in place of real Webware
+ transactions when the Template obj is not used directly as a Webware
+ servlet.
+
+ It only provides a response object and method. All other methods and
+ attributes make no sense in this context.
+ '''
+ def __init__(self, *args, **kwargs):
+ self._response = None
+
+ def response(self, resp=None):
+ if self._response is None:
+ self._response = resp or DummyResponse()
+ return self._response
+
+
+class TransformerResponse(DummyResponse):
+ def __init__(self, *args, **kwargs):
+ super(TransformerResponse, self).__init__(*args, **kwargs)
+ self._filter = None
+
+ def getvalue(self, **kwargs):
+ output = super(TransformerResponse, self).getvalue(**kwargs)
+ if self._filter:
+ _filter = self._filter
+ if isinstance(_filter, type):
+ _filter = _filter()
+ return _filter.filter(output)
+ return output
+
+
+class TransformerTransaction(object):
+ def __init__(self, *args, **kwargs):
+ self._response = None
+ def response(self):
+ if self._response:
+ return self._response
+ return TransformerResponse()
+
View
62 cheetah/ErrorCatchers.py
@@ -0,0 +1,62 @@
+# $Id: ErrorCatchers.py,v 1.7 2005/01/03 19:59:07 tavis_rudd Exp $
+"""ErrorCatcher class for Cheetah Templates
+
+Meta-Data
+================================================================================
+Author: Tavis Rudd <tavis@damnsimple.com>
+Version: $Revision: 1.7 $
+Start Date: 2001/08/01
+Last Revision Date: $Date: 2005/01/03 19:59:07 $
+"""
+__author__ = "Tavis Rudd <tavis@damnsimple.com>"
+__revision__ = "$Revision: 1.7 $"[11:-2]
+
+import time
+from Cheetah.NameMapper import NotFound
+
+class Error(Exception):
+ pass
+
+class ErrorCatcher:
+ _exceptionsToCatch = (NotFound,)
+
+ def __init__(self, templateObj):
+ pass
+
+ def exceptions(self):
+ return self._exceptionsToCatch
+
+ def warn(self, exc_val, code, rawCode, lineCol):
+ return rawCode
+## make an alias
+Echo = ErrorCatcher
+
+class BigEcho(ErrorCatcher):
+ def warn(self, exc_val, code, rawCode, lineCol):
+ return "="*15 + "&lt;" + rawCode + " could not be found&gt;" + "="*15
+
+class KeyError(ErrorCatcher):
+ def warn(self, exc_val, code, rawCode, lineCol):
+ raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
+
+class ListErrors(ErrorCatcher):
+ """Accumulate a list of errors."""
+ _timeFormat = "%c"
+
+ def __init__(self, templateObj):
+ ErrorCatcher.__init__(self, templateObj)
+ self._errors = []
+
+ def warn(self, exc_val, code, rawCode, lineCol):
+ dict = locals().copy()
+ del dict['self']
+ dict['time'] = time.strftime(self._timeFormat,
+ time.localtime(time.time()))
+ self._errors.append(dict)
+ return rawCode
+
+ def listErrors(self):
+ """Return the list of errors."""
+ return self._errors
+
+
View
357 cheetah/FileUtils.py
@@ -0,0 +1,357 @@
+
+from glob import glob
+import os
+from os import listdir
+import os.path
+import re
+from tempfile import mktemp
+
+def _escapeRegexChars(txt,
+ escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
+ return escapeRE.sub(r'\\\1', txt)
+
+def findFiles(*args, **kw):
+ """Recursively find all the files matching a glob pattern.
+
+ This function is a wrapper around the FileFinder class. See its docstring
+ for details about the accepted arguments, etc."""
+
+ return FileFinder(*args, **kw).files()
+
+def replaceStrInFiles(files, theStr, repl):
+
+ """Replace all instances of 'theStr' with 'repl' for each file in the 'files'
+ list. Returns a dictionary with data about the matches found.
+
+ This is like string.replace() on a multi-file basis.
+
+ This function is a wrapper around the FindAndReplace class. See its
+ docstring for more details."""
+
+ pattern = _escapeRegexChars(theStr)
+ return FindAndReplace(files, pattern, repl).results()
+
+def replaceRegexInFiles(files, pattern, repl):
+
+ """Replace all instances of regex 'pattern' with 'repl' for each file in the
+ 'files' list. Returns a dictionary with data about the matches found.
+
+ This is like re.sub on a multi-file basis.
+
+ This function is a wrapper around the FindAndReplace class. See its
+ docstring for more details."""
+
+ return FindAndReplace(files, pattern, repl).results()
+
+
+##################################################
+## CLASSES
+
+class FileFinder:
+
+ """Traverses a directory tree and finds all files in it that match one of
+ the specified glob patterns."""
+
+ def __init__(self, rootPath,
+ globPatterns=('*',),
+ ignoreBasenames=('CVS', '.svn'),
+ ignoreDirs=(),
+ ):
+
+ self._rootPath = rootPath
+ self._globPatterns = globPatterns
+ self._ignoreBasenames = ignoreBasenames
+ self._ignoreDirs = ignoreDirs
+ self._files = []
+
+ self.walkDirTree(rootPath)
+
+ def walkDirTree(self, dir='.',
+
+ listdir=os.listdir,
+ isdir=os.path.isdir,
+ join=os.path.join,
+ ):
+
+ """Recursively walk through a directory tree and find matching files."""
+ processDir = self.processDir
+ filterDir = self.filterDir
+
+ pendingDirs = [dir]
+ addDir = pendingDirs.append
+ getDir = pendingDirs.pop
+
+ while pendingDirs:
+ dir = getDir()
+ ## process this dir
+ processDir(dir)
+
+ ## and add sub-dirs
+ for baseName in listdir(dir):
+ fullPath = join(dir, baseName)
+ if isdir(fullPath):
+ if filterDir(baseName, fullPath):
+ addDir( fullPath )
+
+ def filterDir(self, baseName, fullPath):
+
+ """A hook for filtering out certain dirs. """
+
+ return not (baseName in self._ignoreBasenames or
+ fullPath in self._ignoreDirs)
+
+ def processDir(self, dir, glob=glob):
+ extend = self._files.extend
+ for pattern in self._globPatterns:
+ extend( glob(os.path.join(dir, pattern)) )
+
+ def files(self):
+ return self._files
+
+class _GenSubberFunc:
+
+ """Converts a 'sub' string in the form that one feeds to re.sub (backrefs,
+ groups, etc.) into a function that can be used to do the substitutions in
+ the FindAndReplace class."""
+
+ backrefRE = re.compile(r'\\([1-9][0-9]*)')
+ groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>')
+
+ def __init__(self, replaceStr):
+ self._src = replaceStr
+ self._pos = 0
+ self._codeChunks = []
+ self.parse()
+
+ def src(self):
+ return self._src
+
+ def pos(self):
+ return self._pos
+
+ def setPos(self, pos):
+ self._pos = pos
+
+ def atEnd(self):
+ return self._pos >= len(self._src)
+
+ def advance(self, offset=1):
+ self._pos += offset
+
+ def readTo(self, to, start=None):
+ if start == None:
+ start = self._pos
+ self._pos = to
+ if self.atEnd():
+ return self._src[start:]
+ else:
+ return self._src[start:to]
+
+ ## match and get methods
+
+ def matchBackref(self):
+ return self.backrefRE.match(self.src(), self.pos())
+
+ def getBackref(self):
+ m = self.matchBackref()
+ self.setPos(m.end())
+ return m.group(1)
+
+ def matchGroup(self):
+ return self.groupRE.match(self.src(), self.pos())
+
+ def getGroup(self):
+ m = self.matchGroup()
+ self.setPos(m.end())
+ return m.group(1)
+
+ ## main parse loop and the eat methods
+
+ def parse(self):
+ while not self.atEnd():
+ if self.matchBackref():
+ self.eatBackref()
+ elif self.matchGroup():
+ self.eatGroup()
+ else:
+ self.eatStrConst()
+
+ def eatStrConst(self):
+ startPos = self.pos()
+ while not self.atEnd():
+ if self.matchBackref() or self.matchGroup():
+ break
+ else:
+ self.advance()
+ strConst = self.readTo(self.pos(), start=startPos)
+ self.addChunk(repr(strConst))
+
+ def eatBackref(self):
+ self.addChunk( 'm.group(' + self.getBackref() + ')' )
+
+ def eatGroup(self):
+ self.addChunk( 'm.group("' + self.getGroup() + '")' )
+
+ def addChunk(self, chunk):
+ self._codeChunks.append(chunk)
+
+ ## code wrapping methods
+
+ def codeBody(self):
+ return ', '.join(self._codeChunks)
+
+ def code(self):
+ return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody())
+
+ def subberFunc(self):
+ exec(self.code())
+ return subber
+
+
+class FindAndReplace:
+
+ """Find and replace all instances of 'patternOrRE' with 'replacement' for
+ each file in the 'files' list. This is a multi-file version of re.sub().
+
+ 'patternOrRE' can be a raw regex pattern or
+ a regex object as generated by the re module. 'replacement' can be any
+ string that would work with patternOrRE.sub(replacement, fileContents).
+ """
+
+ def __init__(self, files, patternOrRE, replacement,
+ recordResults=True):
+
+
+ if isinstance(patternOrRE, basestring):
+ self._regex = re.compile(patternOrRE)
+ else:
+ self._regex = patternOrRE
+ if isinstance(replacement, basestring):
+ self._subber = _GenSubberFunc(replacement).subberFunc()
+ else:
+ self._subber = replacement
+
+ self._pattern = pattern = self._regex.pattern
+ self._files = files
+ self._results = {}
+ self._recordResults = recordResults
+
+ ## see if we should use pgrep to do the file matching
+ self._usePgrep = False
+ if (os.popen3('pgrep')[2].read()).startswith('Usage:'):
+ ## now check to make sure pgrep understands the pattern
+ tmpFile = mktemp()
+ open(tmpFile, 'w').write('#')
+ if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()):
+ # it didn't print an error msg so we're ok
+ self._usePgrep = True
+ os.remove(tmpFile)
+
+ self._run()
+
+ def results(self):
+ return self._results
+
+ def _run(self):
+ regex = self._regex
+ subber = self._subDispatcher
+ usePgrep = self._usePgrep
+ pattern = self._pattern
+ for file in self._files:
+ if not os.path.isfile(file):
+ continue # skip dirs etc.
+
+ self._currFile = file
+ found = False
+ if 'orig' in locals():
+ del orig
+ if self._usePgrep:
+ if os.popen('pgrep "' + pattern + '" ' + file ).read():
+ found = True
+ else:
+ orig = open(file).read()
+ if regex.search(orig):
+ found = True
+ if found:
+ if 'orig' not in locals():
+ orig = open(file).read()
+ new = regex.sub(subber, orig)
+ open(file, 'w').write(new)
+
+ def _subDispatcher(self, match):
+ if self._recordResults:
+ if self._currFile not in self._results:
+ res = self._results[self._currFile] = {}
+ res['count'] = 0
+ res['matches'] = []
+ else:
+ res = self._results[self._currFile]
+ res['count'] += 1
+ res['matches'].append({'contents': match.group(),
+ 'start': match.start(),
+ 'end': match.end(),
+ }
+ )
+ return self._subber(match)
+
+
+class SourceFileStats:
+
+ """
+ """
+
+ _fileStats = None
+
+ def __init__(self, files):
+ self._fileStats = stats = {}
+ for file in files:
+ stats[file] = self.getFileStats(file)
+
+ def rawStats(self):
+ return self._fileStats
+
+ def summary(self):
+ codeLines = 0
+ blankLines = 0
+ commentLines = 0
+ totalLines = 0
+ for fileStats in self.rawStats().values():
+ codeLines += fileStats['codeLines']
+ blankLines += fileStats['blankLines']
+ commentLines += fileStats['commentLines']
+ totalLines += fileStats['totalLines']
+
+ stats = {'codeLines': codeLines,
+ 'blankLines': blankLines,
+ 'commentLines': commentLines,
+ 'totalLines': totalLines,
+ }
+ return stats
+
+ def printStats(self):
+ pass
+
+ def getFileStats(self, fileName):
+ codeLines = 0
+ blankLines = 0
+ commentLines = 0
+ commentLineRe = re.compile(r'\s#.*$')
+ blankLineRe = re.compile('\s$')
+ lines = open(fileName).read().splitlines()
+ totalLines = len(lines)
+
+ for line in lines:
+ if commentLineRe.match(line):
+ commentLines += 1
+ elif blankLineRe.match(line):
+ blankLines += 1
+ else:
+ codeLines += 1
+
+ stats = {'codeLines': codeLines,
+ 'blankLines': blankLines,
+ 'commentLines': commentLines,
+ 'totalLines': totalLines,
+ }
+
+ return stats
View
212 cheetah/Filters.py
@@ -0,0 +1,212 @@
+'''
+ Filters for the #filter directive as well as #transform
+
+ #filter results in output filters Cheetah's $placeholders .
+ #transform results in a filter on the entirety of the output
+'''
+import sys
+
+# Additional entities WebSafe knows how to transform. No need to include
+# '<', '>' or '&' since those will have been done already.
+webSafeEntities = {' ': '&nbsp;', '"': '&quot;'}
+
+class Filter(object):
+ """A baseclass for the Cheetah Filters."""
+
+ def __init__(self, template=None):
+ """Setup a reference to the template that is using the filter instance.
+ This reference isn't used by any of the standard filters, but is
+ available to Filter subclasses, should they need it.
+
+ Subclasses should call this method.
+ """
+ self.template = template
+
+ def filter(self, val, encoding=None, str=str, **kw):
+ '''
+ Pass Unicode strings through unmolested, unless an encoding is specified.
+ '''
+ if val is None:
+ return u''
+ if isinstance(val, unicode):
+ # ignore the encoding and return the unicode object
+ return val
+ else:
+ try:
+ return unicode(val)
+ except UnicodeDecodeError:
+ # we could put more fallbacks here, but we'll just pass the str
+ # on and let DummyTransaction worry about it
+ return str(val)
+
+RawOrEncodedUnicode = Filter
+
+EncodeUnicode = Filter
+
+class Markdown(EncodeUnicode):
+ '''
+ Markdown will change regular strings to Markdown
+ (http://daringfireball.net/projects/markdown/)
+
+ Such that:
+ My Header
+ =========
+ Becaomes:
+ <h1>My Header</h1>
+
+ and so on.
+
+ Markdown is meant to be used with the #transform
+ tag, as it's usefulness with #filter is marginal at
+ best
+ '''
+ def filter(self, value, **kwargs):
+ # This is a bit of a hack to allow outright embedding of the markdown module
+ try:
+ import markdown
+ except ImportError:
+ print('>>> Exception raised importing the "markdown" module')
+ print('>>> Are you sure you have the ElementTree module installed?')
+ print(' http://effbot.org/downloads/#elementtree')
+ raise
+
+ encoded = super(Markdown, self).filter(value, **kwargs)
+ return markdown.markdown(encoded)
+
+class CodeHighlighter(EncodeUnicode):
+ '''
+ The CodeHighlighter filter depends on the "pygments" module which you can
+ download and install from: http://pygments.org
+
+ What the CodeHighlighter assumes the string that it's receiving is source
+ code and uses pygments.lexers.guess_lexer() to try to guess which parser
+ to use when highlighting it.
+
+ CodeHighlighter will return the HTML and CSS to render the code block, syntax
+ highlighted, in a browser
+
+ NOTE: I had an issue installing pygments on Linux/amd64/Python 2.6 dealing with
+ importing of pygments.lexers, I was able to correct the failure by adding:
+ raise ImportError
+ to line 39 of pygments/plugin.py (since importing pkg_resources was causing issues)
+ '''
+ def filter(self, source, **kwargs):
+ encoded = super(CodeHighlighter, self).filter(source, **kwargs)
+ try:
+ from pygments import highlight
+ from pygments import lexers
+ from pygments import formatters
+ except ImportError, ex:
+ print('<%s> - Failed to import pygments! (%s)' % (self.__class__.__name__, ex))
+ print('-- You may need to install it from: http://pygments.org')
+ return encoded
+
+ lexer = None
+ try:
+ lexer = lexers.guess_lexer(source)
+ except lexers.ClassNotFound:
+ lexer = lexers.PythonLexer()
+
+ formatter = formatters.HtmlFormatter(cssclass='code_highlighter')
+ encoded = highlight(encoded, lexer, formatter)
+ css = formatter.get_style_defs('.code_highlighter')
+ return '''<style type="text/css"><!--
+ %(css)s
+ --></style>%(source)s''' % {'css' : css, 'source' : encoded}
+
+
+
+class MaxLen(Filter):
+ def filter(self, val, **kw):
+ """Replace None with '' and cut off at maxlen."""
+
+ output = super(MaxLen, self).filter(val, **kw)
+ if 'maxlen' in kw and len(output) > kw['maxlen']:
+ return output[:kw['maxlen']]
+ return output
+
+class WebSafe(Filter):
+ """Escape HTML entities in $placeholders.
+ """
+ def filter(self, val, **kw):
+ s = super(WebSafe, self).filter(val, **kw)
+ # These substitutions are copied from cgi.escape().
+ s = s.replace("&", "&amp;") # Must be done first!
+ s = s.replace("<", "&lt;")
+ s = s.replace(">", "&gt;")
+ # Process the additional transformations if any.
+ if 'also' in kw:
+ also = kw['also']
+ entities = webSafeEntities # Global variable.
+ for k in also:
+ if k in entities:
+ v = entities[k]
+ else:
+ v = "&#%s;" % ord(k)
+ s = s.replace(k, v)
+ return s
+
+
+class Strip(Filter):
+ """Strip leading/trailing whitespace but preserve newlines.
+
+ This filter goes through the value line by line, removing leading and
+ trailing whitespace on each line. It does not strip newlines, so every
+ input line corresponds to one output line, with its trailing newline intact.
+
+ We do not use val.split('\n') because that would squeeze out consecutive
+ blank lines. Instead, we search for each newline individually. This
+ makes us unable to use the fast C .split method, but it makes the filter
+ much more widely useful.
+
+ This filter is intended to be usable both with the #filter directive and
+ with the proposed #sed directive (which has not been ratified yet.)
+ """
+ def filter(self, val, **kw):
+ s = super(Strip, self).filter(val, **kw)
+ result = []
+ start = 0 # The current line will be s[start:end].
+ while True: # Loop through each line.
+ end = s.find('\n', start) # Find next newline.
+ if end == -1: # If no more newlines.
+ break
+ chunk = s[start:end].strip()
+ result.append(chunk)
+ result.append('\n')
+ start = end + 1
+ # Write the unfinished portion after the last newline, if any.
+ chunk = s[start:].strip()
+ result.append(chunk)
+ return "".join(result)
+
+class StripSqueeze(Filter):
+ """Canonicalizes every chunk of whitespace to a single space.
+
+ Strips leading/trailing whitespace. Removes all newlines, so multi-line
+ input is joined into one ling line with NO trailing newline.
+ """
+ def filter(self, val, **kw):
+ s = super(StripSqueeze, self).filter(val, **kw)
+ s = s.split()
+ return " ".join(s)
+
+##################################################
+## MAIN ROUTINE -- testing
+
+def test():
+ s1 = "abc <=> &"
+ s2 = " asdf \n\t 1 2 3\n"
+ print("WebSafe INPUT:", repr(s1))
+ print(" WebSafe:", repr(WebSafe().filter(s1)))
+
+ print()
+ print(" Strip INPUT:", repr(s2))
+ print(" Strip:", repr(Strip().filter(s2)))
+ print("StripSqueeze:", repr(StripSqueeze().filter(s2)))
+
+ print("Unicode:", repr(EncodeUnicode().filter(u'aoeu12345\u1234')))
+
+if __name__ == "__main__":
+ test()
+
+# vim: shiftwidth=4 tabstop=4 expandtab
View
129 cheetah/ImportHooks.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+
+"""
+Provides some import hooks to allow Cheetah's .tmpl files to be imported
+directly like Python .py modules.
+
+To use these:
+ import Cheetah.ImportHooks
+ Cheetah.ImportHooks.install()
+"""
+
+import sys
+import os.path
+import types
+import __builtin__
+import imp
+from threading import RLock
+import string
+import traceback
+import types
+
+from Cheetah import ImportManager
+from Cheetah.ImportManager import DirOwner
+from Cheetah.Compiler import Compiler
+from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName
+
+_installed = False
+
+##################################################
+## HELPER FUNCS
+
+_cacheDir = []
+def setCacheDir(cacheDir):
+ global _cacheDir
+ _cacheDir.append(cacheDir)
+
+##################################################
+## CLASSES
+
+class CheetahDirOwner(DirOwner):
+ _lock = RLock()
+ _acquireLock = _lock.acquire
+ _releaseLock = _lock.release
+
+ templateFileExtensions = ('.tmpl',)
+
+ def getmod(self, name):
+ self._acquireLock()
+ try:
+ mod = DirOwner.getmod(self, name)
+ if mod:
+ return mod
+
+ for ext in self.templateFileExtensions:
+ tmplPath = os.path.join(self.path, name + ext)
+ if os.path.exists(tmplPath):
+ try:
+ return self._compile(name, tmplPath)
+ except:
+ # @@TR: log the error
+ exc_txt = traceback.format_exc()
+ exc_txt =' '+(' \n'.join(exc_txt.splitlines()))
+ raise ImportError(
+ 'Error while compiling Cheetah module'
+ ' %(name)s, original traceback follows:\n%(exc_txt)s'%locals())
+ ##
+ return None
+
+ finally:
+ self._releaseLock()
+
+ def _compile(self, name, tmplPath):
+ ## @@ consider adding an ImportError raiser here
+ code = str(Compiler(file=tmplPath, moduleName=name,
+ mainClassName=name))
+ if _cacheDir:
+ __file__ = os.path.join(_cacheDir[0],
+ convertTmplPathToModuleName(tmplPath)) + '.py'
+ try:
+ open(__file__, 'w').write(code)
+ except OSError:
+ ## @@ TR: need to add some error code here
+ traceback.print_exc(file=sys.stderr)
+ __file__ = tmplPath
+ else:
+ __file__ = tmplPath
+ co = compile(code+'\n', __file__, 'exec')
+
+ mod = types.ModuleType(name)
+ mod.__file__ = co.co_filename
+ if _cacheDir:
+ mod.__orig_file__ = tmplPath # @@TR: this is used in the WebKit
+ # filemonitoring code
+ mod.__co__ = co
+ return mod
+
+
+##################################################
+## FUNCTIONS
+
+def install(templateFileExtensions=('.tmpl',)):
+ """Install the Cheetah Import Hooks"""
+
+ global _installed
+ if not _installed:
+ CheetahDirOwner.templateFileExtensions = templateFileExtensions
+ import __builtin__
+ if isinstance(__builtin__.__import__, types.BuiltinFunctionType):
+ global __oldimport__
+ __oldimport__ = __builtin__.__import__
+ ImportManager._globalOwnerTypes.insert(0, CheetahDirOwner)
+ #ImportManager._globalOwnerTypes.append(CheetahDirOwner)
+ global _manager
+ _manager=ImportManager.ImportManager()
+ _manager.setThreaded()
+ _manager.install()
+
+def uninstall():
+ """Uninstall the Cheetah Import Hooks"""
+ global _installed
+ if not _installed:
+ import __builtin__
+ if isinstance(__builtin__.__import__, types.MethodType):
+ __builtin__.__import__ = __oldimport__
+ global _manager
+ del _manager
+
+if __name__ == '__main__':
+ install()
View
541 cheetah/ImportManager.py
@@ -0,0 +1,541 @@
+"""
+Provides an emulator/replacement for Python's standard import system.
+
+@@TR: Be warned that Import Hooks are in the deepest, darkest corner of Python's
+jungle. If you need to start hacking with this, be prepared to get lost for a
+while. Also note, this module predates the newstyle import hooks in Python 2.3
+http://www.python.org/peps/pep-0302.html.
+
+
+This is a hacked/documented version of Gordon McMillan's iu.py. I have:
+
+ - made it a little less terse
+
+ - added docstrings and explanatations
+
+ - standardized the variable naming scheme
+
+ - reorganized the code layout to enhance readability
+
+"""
+
+import sys
+import imp
+import marshal
+
+_installed = False
+
+# _globalOwnerTypes is defined at the bottom of this file
+
+_os_stat = _os_path_join = _os_getcwd = _os_path_dirname = None
+
+##################################################
+## FUNCTIONS
+
+def _os_bootstrap():
+ """Set up 'os' module replacement functions for use during import bootstrap."""
+
+ names = sys.builtin_module_names
+
+ join = dirname = None
+ if 'posix' in names:
+ sep = '/'
+ from posix import stat, getcwd
+ elif 'nt' in names:
+ sep = '\\'
+ from nt import stat, getcwd
+ elif 'dos' in names:
+ sep = '\\'
+ from dos import stat, getcwd
+ elif 'os2' in names:
+ sep = '\\'
+ from os2 import stat, getcwd
+ elif 'mac' in names:
+ from mac import stat, getcwd
+ def join(a, b):
+ if a == '':
+ return b
+ if ':' not in a:
+ a = ':' + a
+ if a[-1:] != ':':
+ a = a + ':'
+ return a + b
+ else:
+ raise ImportError('no os specific module found')
+
+ if join is None:
+ def join(a, b, sep=sep):
+ if a == '':
+ return b
+ lastchar = a[-1:]
+ if lastchar == '/' or lastchar == sep:
+ return a + b
+ return a + sep + b
+
+ if dirname is None:
+ def dirname(a, sep=sep):
+ for i in range(len(a)-1, -1, -1):
+ c = a[i]
+ if c == '/' or c == sep:
+ return a[:i]
+ return ''
+
+ global _os_stat
+ _os_stat = stat
+
+ global _os_path_join
+ _os_path_join = join
+
+ global _os_path_dirname
+ _os_path_dirname = dirname
+
+ global _os_getcwd
+ _os_getcwd = getcwd
+
+_os_bootstrap()
+
+def packageName(s):
+ for i in range(len(s)-1, -1, -1):
+ if s[i] == '.':
+ break
+ else:
+ return ''
+ return s[:i]
+
+def nameSplit(s):
+ rslt = []
+ i = j = 0
+ for j in range(len(s)):
+ if s[j] == '.':
+ rslt.append(s[i:j])
+ i = j+1
+ if i < len(s):
+ rslt.append(s[i:])
+ return rslt
+
+def getPathExt(fnm):
+ for i in range(len(fnm)-1, -1, -1):
+ if fnm[i] == '.':
+ return fnm[i:]
+ return ''
+
+def pathIsDir(pathname):
+ "Local replacement for os.path.isdir()."
+ try:
+ s = _os_stat(pathname)
+ except OSError:
+ return None
+ return (s[0] & 0170000) == 0040000
+
+def getDescr(fnm):
+ ext = getPathExt(fnm)
+ for (suffix, mode, typ) in imp.get_suffixes():
+ if suffix == ext:
+ return (suffix, mode, typ)
+
+##################################################
+## CLASSES
+
+class Owner:
+
+ """An Owner does imports from a particular piece of turf That is, there's
+ an Owner for each thing on sys.path There are owners for directories and
+ .pyz files. There could be owners for zip files, or even URLs. A
+ shadowpath (a dictionary mapping the names in sys.path to their owners) is
+ used so that sys.path (or a package's __path__) is still a bunch of strings,
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def __str__(self):
+ return self.path
+
+ def getmod(self, nm):
+ return None
+
+class DirOwner(Owner):
+
+ def __init__(self, path):
+ if path == '':
+ path = _os_getcwd()
+ if not pathIsDir(path):
+ raise ValueError("%s is not a directory" % path)
+ Owner.__init__(self, path)
+
+ def getmod(self, nm,
+ getsuffixes=imp.get_suffixes, loadco=marshal.loads, newmod=imp.new_module):
+
+ pth = _os_path_join(self.path, nm)
+
+ possibles = [(pth, 0, None)]
+ if pathIsDir(pth):
+ possibles.insert(0, (_os_path_join(pth, '__init__'), 1, pth))
+ py = pyc = None
+ for pth, ispkg, pkgpth in possibles:
+ for ext, mode, typ in getsuffixes():
+ attempt = pth+ext
+ try:
+ st = _os_stat(attempt)
+ except:
+ pass
+ else:
+ if typ == imp.C_EXTENSION:
+ fp = open(attempt, 'rb')
+ mod = imp.load_module(nm, fp, attempt, (ext, mode, typ))
+ mod.__file__ = attempt
+ return mod
+ elif typ == imp.PY_SOURCE:
+ py = (attempt, st)
+ else:
+ pyc = (attempt, st)
+ if py or pyc:
+ break
+ if py is None and pyc is None:
+ return None
+ while True:
+ if pyc is None or py and pyc[1][8] < py[1][8]:
+ try:
+ co = compile(open(py[0], 'r').read()+'\n', py[0], 'exec')
+ break
+ except SyntaxError, e:
+ print("Invalid syntax in %s" % py[0])
+ print(e.args)
+ raise
+ elif pyc:
+ stuff = open(pyc[0], 'rb').read()
+ try:
+ co = loadco(stuff[8:])
+ break
+ except (ValueError, EOFError):
+ pyc = None
+ else:
+ return None
+ mod = newmod(nm)
+ mod.__file__ = co.co_filename
+ if ispkg:
+ mod.__path__ = [pkgpth]
+ subimporter = PathImportDirector(mod.__path__)
+ mod.__importsub__ = subimporter.getmod
+ mod.__co__ = co
+ return mod
+
+
+class ImportDirector(Owner):
+ """ImportDirectors live on the metapath There's one for builtins, one for
+ frozen modules, and one for sys.path Windows gets one for modules gotten
+ from the Registry Mac would have them for PY_RESOURCE modules etc. A
+ generalization of Owner - their concept of 'turf' is broader"""
+
+ pass
+
+class BuiltinImportDirector(ImportDirector):
+ """Directs imports of builtin modules"""
+ def __init__(self):
+ self.path = 'Builtins'
+
+ def getmod(self, nm, isbuiltin=imp.is_builtin):
+ if isbuiltin(nm):
+ mod = imp.load_module(nm, None, nm, ('', '', imp.C_BUILTIN))
+ return mod
+ return None
+
+class FrozenImportDirector(ImportDirector):
+ """Directs imports of frozen modules"""
+
+ def __init__(self):
+ self.path = 'FrozenModules'
+
+ def getmod(self, nm,
+ isFrozen=imp.is_frozen, loadMod=imp.load_module):
+ if isFrozen(nm):
+ mod = loadMod(nm, None, nm, ('', '', imp.PY_FROZEN))
+ if hasattr(mod, '__path__'):
+ mod.__importsub__ = lambda name, pname=nm, owner=self: owner.getmod(pname+'.'+name)
+ return mod
+ return None
+
+
+class RegistryImportDirector(ImportDirector):
+ """Directs imports of modules stored in the Windows Registry"""
+
+ def __init__(self):
+ self.path = "WindowsRegistry"
+ self.map = {}
+ try:
+ import win32api
+ ## import win32con
+ except ImportError:
+ pass
+ else:
+ HKEY_CURRENT_USER = -2147483647
+ HKEY_LOCAL_MACHINE = -2147483646
+ KEY_ALL_ACCESS = 983103
+ subkey = r"Software\Python\PythonCore\%s\Modules" % sys.winver
+ for root in (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE):
+ try:
+ hkey = win32api.RegOpenKeyEx(root, subkey, 0, KEY_ALL_ACCESS)
+ except:
+ pass
+ else:
+ numsubkeys, numvalues, lastmodified = win32api.RegQueryInfoKey(hkey)
+ for i in range(numsubkeys):
+ subkeyname = win32api.RegEnumKey(hkey, i)
+ hskey = win32api.RegOpenKeyEx(hkey, subkeyname, 0, KEY_ALL_ACCESS)
+ val = win32api.RegQueryValueEx(hskey, '')
+ desc = getDescr(val[0])
+ self.map[subkeyname] = (val[0], desc)
+ hskey.Close()
+ hkey.Close()
+ break
+
+ def getmod(self, nm):
+ stuff = self.map.get(nm)
+ if stuff:
+ fnm, desc = stuff
+ fp = open(fnm, 'rb')
+ mod = imp.load_module(nm, fp, fnm, desc)
+ mod.__file__ = fnm
+ return mod
+ return None
+
+class PathImportDirector(ImportDirector):
+ """Directs imports of modules stored on the filesystem."""
+
+ def __init__(self, pathlist=None, importers=None, ownertypes=None):
+ if pathlist is None:
+ self.path = sys.path
+ else:
+ self.path = pathlist
+ if ownertypes == None:
+ self._ownertypes = _globalOwnerTypes
+ else:
+ self._ownertypes = ownertypes
+ if importers:
+ self._shadowPath = importers
+ else:
+ self._shadowPath = {}
+ self._inMakeOwner = False
+ self._building = {}
+
+ def getmod(self, nm):
+ mod = None
+ for thing in self.path:
+ if isinstance(thing, basestring):
+ owner = self._shadowPath.get(thing, -1)
+ if owner == -1:
+ owner = self._shadowPath[thing] = self._makeOwner(thing)
+ if owner:
+ mod = owner.getmod(nm)
+ else:
+ mod = thing.getmod(nm)
+ if mod:
+ break
+ return mod
+
+ def _makeOwner(self, path):
+ if self._building.get(path):
+ return None
+ self._building[path] = 1
+ owner = None
+ for klass in self._ownertypes:
+ try:
+ # this may cause an import, which may cause recursion
+ # hence the protection
+ owner = klass(path)
+ except:
+ pass
+ else:
+ break
+ del self._building[path]
+ return owner
+
+#=================ImportManager============================#
+# The one-and-only ImportManager
+# ie, the builtin import
+
+UNTRIED = -1
+
+class ImportManager:
+ # really the equivalent of builtin import
+ def __init__(self):
+ self.metapath = [
+ BuiltinImportDirector(),
+ FrozenImportDirector(),
+ RegistryImportDirector(),
+ PathImportDirector()
+ ]
+ self.threaded = 0
+ self.rlock = None
+ self.locker = None
+ self.setThreaded()
+
+ def setThreaded(self):
+ thread = sys.modules.get('thread', None)
+ if thread and not self.threaded:
+ self.threaded = 1
+ self.rlock = thread.allocate_lock()
+ self._get_ident = thread.get_ident
+
+ def install(self):
+ import __builtin__
+ __builtin__.__import__ = self.importHook
+ __builtin__.reload = self.reloadHook
+
+ def importHook(self, name, globals=None, locals=None, fromlist=None, level=-1):
+ '''
+ NOTE: Currently importHook will accept the keyword-argument "level"
+ but it will *NOT* use it (currently). Details about the "level" keyword
+ argument can be found here: http://www.python.org/doc/2.5.2/lib/built-in-funcs.html
+ '''
+ # first see if we could be importing a relative name
+ #print "importHook(%s, %s, locals, %s)" % (name, globals['__name__'], fromlist)
+ _sys_modules_get = sys.modules.get
+ contexts = [None]
+ if globals:
+ importernm = globals.get('__name__', '')
+ if importernm:
+ if hasattr(_sys_modules_get(importernm), '__path__'):
+ contexts.insert(0, importernm)
+ else:
+ pkgnm = packageName(importernm)
+ if pkgnm:
+ contexts.insert(0, pkgnm)
+ # so contexts is [pkgnm, None] or just [None]
+ # now break the name being imported up so we get:
+ # a.b.c -> [a, b, c]
+ nmparts = nameSplit(name)
+ _self_doimport = self.doimport
+ threaded = self.threaded
+ for context in contexts:
+ ctx = context
+ for i in range(len(nmparts)):
+ nm = nmparts[i]
+ #print " importHook trying %s in %s" % (nm, ctx)
+ if ctx:
+ fqname = ctx + '.' + nm
+ else:
+ fqname = nm
+ if threaded:
+ self._acquire()
+ mod = _sys_modules_get(fqname, UNTRIED)
+ if mod is UNTRIED:
+ mod = _self_doimport(nm, ctx, fqname)
+ if threaded:
+ self._release()
+ if mod:
+ ctx = fqname
+ else:
+ break
+ else:
+ # no break, point i beyond end
+ i = i + 1
+ if i:
+ break
+
+ if i<len(nmparts):
+ if ctx and hasattr(sys.modules[ctx], nmparts[i]):
+ #print "importHook done with %s %s %s (case 1)" % (name, globals['__name__'], fromlist)
+ return sys.modules[nmparts[0]]
+ del sys.modules[fqname]
+ raise ImportError("No module named %s" % fqname)
+ if fromlist is None:
+ #print "importHook done with %s %s %s (case 2)" % (name, globals['__name__'], fromlist)
+ if context:
+ return sys.modules[context+'.'+nmparts[0]]
+ return sys.modules[nmparts[0]]
+ bottommod = sys.modules[ctx]
+ if hasattr(bottommod, '__path__'):
+ fromlist = list(fromlist)
+ i = 0
+ while i < len(fromlist):
+ nm = fromlist[i]
+ if nm == '*':
+ fromlist[i:i+1] = list(getattr(bottommod, '__all__', []))
+ if i >= len(fromlist):
+ break
+ nm = fromlist[i]
+ i = i + 1
+ if not hasattr(bottommod, nm):
+ if self.threaded:
+ self._acquire()
+ mod = self.doimport(nm, ctx, ctx+'.'+nm)
+ if self.threaded:
+ self._release()
+ if not mod:
+ raise ImportError("%s not found in %s" % (nm, ctx))
+ #print "importHook done with %s %s %s (case 3)" % (name, globals['__name__'], fromlist)
+ return bottommod
+
+ def doimport(self, nm, parentnm, fqname):
+ # Not that nm is NEVER a dotted name at this point
+ #print "doimport(%s, %s, %s)" % (nm, parentnm, fqname)
+ if parentnm:
+ parent = sys.modules[parentnm]
+ if hasattr(parent, '__path__'):
+ importfunc = getattr(parent, '__importsub__', None)
+ if not importfunc:
+ subimporter = PathImportDirector(parent.__path__)
+ importfunc = parent.__importsub__ = subimporter.getmod
+ mod = importfunc(nm)
+ if mod:
+ setattr(parent, nm, mod)
+ else:
+ #print "..parent not a package"
+ return None
+ else:
+ # now we're dealing with an absolute import
+ for director in self.metapath:
+ mod = director.getmod(nm)
+ if mod:
+ break
+ if mod:
+ mod.__name__ = fqname
+ sys.modules[fqname] = mod
+ if hasattr(mod, '__co__'):
+ co = mod.__co__
+ del mod.__co__
+ exec(co, mod.__dict__)
+ if fqname == 'thread' and not self.threaded:
+## print "thread detected!"
+ self.setThreaded()
+ else:
+ sys.modules[fqname] = None
+ #print "..found %s" % mod
+ return mod
+
+ def reloadHook(self, mod):
+ fqnm = mod.__name__
+ nm = nameSplit(fqnm)[-1]
+ parentnm = packageName(fqnm)
+ newmod = self.doimport(nm, parentnm, fqnm)
+ mod.__dict__.update(newmod.__dict__)
+## return newmod
+
+ def _acquire(self):
+ if self.rlock.locked():
+ if self.locker == self._get_ident():
+ self.lockcount = self.lockcount + 1
+## print "_acquire incrementing lockcount to", self.lockcount
+ return
+ self.rlock.acquire()
+ self.locker = self._get_ident()
+ self.lockcount = 0
+## print "_acquire first time!"
+
+ def _release(self):
+ if self.lockcount:
+ self.lockcount = self.lockcount - 1
+## print "_release decrementing lockcount to", self.lockcount
+ else:
+ self.rlock.release()
+## print "_release releasing lock!"
+
+
+##################################################
+## MORE CONSTANTS & GLOBALS
+
+_globalOwnerTypes = [
+ DirOwner,
+ Owner,
+]
View
67 cheetah/Macros/I18n.py
@@ -0,0 +1,67 @@
+import gettext
+_ = gettext.gettext
+class I18n(object):
+ def __init__(self, parser):
+ pass
+
+## junk I'm playing with to test the macro framework
+# def parseArgs(self, parser, startPos):
+# parser.getWhiteSpace()
+# args = parser.getExpression(useNameMapper=False,
+# pyTokensToBreakAt=[':']).strip()
+# return args
+#
+# def convertArgStrToDict(self, args, parser=None, startPos=None):
+# def getArgs(*pargs, **kws):
+# return pargs, kws
+# exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
+# return kwArgs
+
+ def __call__(self,
+ src, # aka message,
+ plural=None,
+ n=None, # should be a string representing the name of the
+ # '$var' rather than $var itself
+ id=None,
+ domain=None,
+ source=None,
+ target=None,
+ comment=None,
+
+ # args that are automatically supplied by the parser when the
+ # macro is called:
+ parser=None,
+ macros=None,
+ isShortForm=False,
+ EOLCharsInShortForm=None,
+ startPos=None,
+ endPos=None,
+ ):
+ """This is just a stub at this time.
+
+ plural = the plural form of the message
+ n = a sized argument to distinguish between single and plural forms
+
+ id = msgid in the translation catalog
+ domain = translation domain
+ source = source lang
+ target = a specific target lang
+ comment = a comment to the translation team
+
+ See the following for some ideas
+ http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
+
+ Other notes:
+ - There is no need to replicate the i18n:name attribute from plone / PTL,
+ as cheetah placeholders serve the same purpose
+
+
+ """
+
+ #print macros['i18n']
+ src = _(src)
+ if isShortForm and endPos<len(parser):
+ return src+EOLCharsInShortForm
+ else:
+ return src
+
View
1 cheetah/Macros/__init__.py
@@ -0,0 +1 @@
+#
View
2,661 cheetah/Parser.py
2,661 additions, 0 deletions not shown because the diff is too large. Please use a local Git client to view these changes.
View
48 cheetah/Servlet.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+'''
+Provides an abstract Servlet baseclass for Cheetah's Template class
+'''
+
+import sys
+import os.path
+
+class Servlet(object):
+ """
+ This class is an abstract baseclass for Cheetah.Template.Template.
+ """
+
+ transaction = None
+ application = None
+ request = None
+ session = None
+
+ def respond(self, trans=None):
+ raise NotImplementedError("""\
+couldn't find the template's main method. If you are using #extends
+without #implements, try adding '#implements respond' to your template
+definition.""")
+
+ def sleep(self, transaction):
+ super(Servlet, self).sleep(transaction)
+ self.session = None
+ self.request = None
+ self._request = None
+ self.response = None
+ self.transaction = None
+
+ def shutdown(self):
+ pass
+
+ def serverSidePath(self, path=None,
+ normpath=os.path.normpath,
+ abspath=os.path.abspath
+ ):
+
+ if path:
+ return normpath(abspath(path.replace("\\", '/')))
+ elif hasattr(self, '_filePath') and self._filePath:
+ return normpath(abspath(self._filePath))
+ else:
+ return None
+
+# vim: shiftwidth=4 tabstop=4 expandtab
View
284 cheetah/SettingsManager.py
@@ -0,0 +1,284 @@
+import sys
+import os.path
+import copy as copyModule
+from ConfigParser import ConfigParser
+import re
+from tokenize import Intnumber, Floatnumber, Number
+import types
+import time
+from StringIO import StringIO # not cStringIO because of unicode support
+import imp # used by SettingsManager.updateSettingsFromPySrcFile()
+
+
+numberRE = re.compile(Number)
+complexNumberRE = re.compile('[\(]*' +Number + r'[ \t]*\+[ \t]*' + Number + '[\)]*')
+
+##################################################
+## FUNCTIONS ##
+
+def mergeNestedDictionaries(dict1, dict2, copy=False, deepcopy=False):
+ """Recursively merge the values of dict2 into dict1.
+
+ This little function is very handy for selectively overriding settings in a
+ settings dictionary that has a nested structure.
+ """
+
+ if copy:
+ dict1 = copyModule.copy(dict1)
+ elif deepcopy:
+ dict1 = copyModule.deepcopy(dict1)
+
+ for key, val in dict2.iteritems():
+ if key in dict1 and isinstance(val, dict) and isinstance(dict1[key], dict):
+ dict1[key] = mergeNestedDictionaries(dict1[key], val)
+ else:
+ dict1[key] = val
+ return dict1
+
+def stringIsNumber(S):
+ """Return True if theString represents a Python number, False otherwise.
+ This also works for complex numbers and numbers with +/- in front."""
+
+ S = S.strip()
+
+ if S[0] in '-+' and len(S) > 1:
+ S = S[1:].strip()
+
+ match = complexNumberRE.match(S)
+ if not match:
+ match = numberRE.match(S)
+ if not match or (match.end() != len(S)):
+ return False
+ else:
+ return True
+
+def convStringToNum(theString):
+ """Convert a string representation of a Python number to the Python version"""
+
+ if not stringIsNumber(theString):
+ raise Error(theString + ' cannot be converted to a Python number')
+ return eval(theString, {}, {})
+
+
+class Error(Exception):
+ pass
+
+class NoDefault(object):
+ pass
+
+class ConfigParserCaseSensitive(ConfigParser):
+ """A case sensitive version of the standard Python ConfigParser."""
+
+ def optionxform(self, optionstr):
+ """Don't change the case as is done in the default implemenation."""
+ return optionstr
+
+class _SettingsCollector(object):
+ """An abstract base class that provides the methods SettingsManager uses to
+ collect settings from config files and strings.
+
+ This class only collects settings it doesn't modify the _settings dictionary
+ of SettingsManager instances in any way.
+ """
+
+ _ConfigParserClass = ConfigParserCaseSensitive
+
+ def readSettingsFromModule(self, mod, ignoreUnderscored=True):
+ """Returns all settings from a Python module.
+ """
+ S = {}
+ attrs = vars(mod)
+ for k, v in attrs.iteritems():
+ if (ignoreUnderscored and k.startswith('_')):
+ continue
+ else:
+ S[k] = v
+ return S
+
+ def readSettingsFromPySrcStr(self, theString):
+ """Return a dictionary of the settings in a Python src string."""
+
+ globalsDict = {'True': (1==1),
+ 'False': (0==1),
+ }
+ newSettings = {'self':self}
+ exec((theString+os.linesep), globalsDict, newSettings)
+ del newSettings['self']
+ module = types.ModuleType('temp_settings_module')
+ module.__dict__.update(newSettings)
+ return self.readSettingsFromModule(module)
+
+ def readSettingsFromConfigFileObj(self, inFile, convert=True):
+ """Return the settings from a config file that uses the syntax accepted by
+ Python's standard ConfigParser module (like Windows .ini files).
+
+ NOTE:
+ this method maintains case unlike the ConfigParser module, unless this
+ class was initialized with the 'caseSensitive' keyword set to False.
+
+ All setting values are initially parsed as strings. However, If the
+ 'convert' arg is True this method will do the following value
+ conversions:
+
+ * all Python numeric literals will be coverted from string to number
+
+ * The string 'None' will be converted to the Python value None
+
+ * The string 'True' will be converted to a Python truth value
+
+ * The string 'False' will be converted to a Python false value
+
+ * Any string starting with 'python:' will be treated as a Python literal
+ or expression that needs to be eval'd. This approach is useful for
+ declaring lists and dictionaries.
+
+ If a config section titled 'Globals' is present the options defined
+ under it will be treated as top-level settings.
+ """
+
+ p = self._ConfigParserClass()
+ p.readfp(inFile)
+ sects = p.sections()
+ newSettings = {}
+
+ sects = p.sections()
+ newSettings = {}
+
+ for s in sects:
+ newSettings[s] = {}
+ for o in p.options(s):
+ if o != '__name__':
+ newSettings[s][o] = p.get(s, o)
+
+ ## loop through new settings -> deal with global settings, numbers,
+ ## booleans and None ++ also deal with 'importSettings' commands
+
+ for sect, subDict in newSettings.items():
+ for key, val in subDict.items():
+ if convert:
+ if val.lower().startswith('python:'):
+ subDict[key] = eval(val[7:], {}, {})
+ if val.lower() == 'none':
+ subDict[key] = None
+ if val.lower() == 'true':
+ subDict[key] = True
+ if val.lower() == 'false':
+ subDict[key] = False
+ if stringIsNumber(val):
+ subDict[key] = convStringToNum(val)
+
+ ## now deal with any 'importSettings' commands
+ if key.lower() == 'importsettings':
+ if val.find(';') < 0:
+ importedSettings = self.readSettingsFromPySrcFile(val)
+ else:
+ path = val.split(';')[0]
+ rest = ''.join(val.split(';')[1:]).strip()
+ parentDict = self.readSettingsFromPySrcFile(path)
+ importedSettings = eval('parentDict["' + rest + '"]')
+
+ subDict.update(mergeNestedDictionaries(subDict,
+ importedSettings))
+
+ if sect.lower() == 'globals':
+ newSettings.update(newSettings[sect])
+ del newSettings[sect]
+
+ return newSettings
+
+
+class SettingsManager(_SettingsCollector):
+ """A mixin class that provides facilities for managing application settings.
+
+ SettingsManager is designed to work well with nested settings dictionaries
+ of any depth.
+ """
+
+ def __init__(self):
+ super(SettingsManager, self).__init__()
+ self._settings = {}
+ self._initializeSettings()
+
+ def _defaultSettings(self):
+ return {}
+
+ def _initializeSettings(self):
+ """A hook that allows for complex setting initialization sequences that
+ involve references to 'self' or other settings. For example:
+ self._settings['myCalcVal'] = self._settings['someVal'] * 15
+ This method should be called by the class' __init__() method when needed.
+ The dummy implementation should be reimplemented by subclasses.
+ """
+
+ pass
+
+ ## core post startup methods
+
+ def setting(self, name, default=NoDefault):
+ """Get a setting from self._settings, with or without a default value."""
+
+ if default is NoDefault:
+ return self._settings[name]
+ else:
+ return self._settings.get(name, default)
+
+
+ def hasSetting(self, key):
+ """True/False"""
+ return key in self._settings
+
+ def setSetting(self, name, value):
+ """Set a setting in self._settings."""
+ self._settings[name] = value
+
+ def settings(self):
+ """Return a reference to the settings dictionary"""
+ return self._settings
+
+ def copySettings(self):
+ """Returns a shallow copy of the settings dictionary"""
+ return copyModule.copy(self._settings)
+
+ def deepcopySettings(self):
+ """Returns a deep copy of the settings dictionary"""
+ return copyModule.deepcopy(self._settings)
+
+ def updateSettings(self, newSettings, merge=True):
+ """Update the settings with a selective merge or a complete overwrite."""
+
+ if merge:
+ mergeNestedDictionaries(self._settings, newSettings)
+ else:
+ self._settings.update(newSettings)
+
+
+ ## source specific update methods
+
+ def updateSettingsFromPySrcStr(self, theString, merge=True):
+ """Update the settings from a code in a Python src string."""
+
+ newSettings = self.readSettingsFromPySrcStr(theString)
+ self.updateSettings(newSettings,
+ merge=newSettings.get('mergeSettings', merge) )
+
+
+ def updateSettingsFromConfigFileObj(self, inFile, convert=True, merge=True):
+ """See the docstring for .updateSettingsFromConfigFile()
+
+ The caller of this method is responsible for closing the inFile file
+ object."""
+
+ newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
+ self.updateSettings(newSettings,
+ merge=newSettings.get('mergeSettings', merge))
+
+ def updateSettingsFromConfigStr(self, configStr, convert=True, merge=True):
+ """See the docstring for .updateSettingsFromConfigFile()
+ """
+
+ configStr = '[globals]\n' + configStr
+ inFile = StringIO(configStr)
+ newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert)
+ self.updateSettings(newSettings,
+ merge=newSettings.get('mergeSettings', merge))
+
View
267 cheetah/SourceReader.py
@@ -0,0 +1,267 @@
+"""SourceReader class for Cheetah's Parser and CodeGenerator
+"""
+import re
+import sys
+
+EOLre = re.compile(r'[ \f\t]*(?:\r\n|\r|\n)')
+EOLZre = re.compile(r'(?:\r\n|\r|\n|\Z)')
+ENCODINGsearch = re.compile("coding[=:]\s*([-\w.]+)").search
+
+class Error(Exception):
+ pass
+
+class SourceReader(object):
+ def __init__(self, src, filename=None, breakPoint=None, encoding=None):
+ self._src = src
+ self._filename = filename
+ self._srcLen = len(src)
+ if breakPoint == None:
+ self._breakPoint = self._srcLen
+ else:
+ self.setBreakPoint(breakPoint)
+ self._pos = 0
+ self._bookmarks = {}
+ self._posTobookmarkMap = {}
+
+ ## collect some meta-information
+ self._EOLs = []
+ pos = 0
+ while pos < len(self):
+ EOLmatch = EOLZre.search(src, pos)
+ self._EOLs.append(EOLmatch.start())
+ pos = EOLmatch.end()
+
+ self._BOLs = []
+ for pos in self._EOLs:
+ BOLpos = self.findBOL(pos)
+ self._BOLs.append(BOLpos)
+
+ def src(self):
+ return self._src
+
+ def filename(self):
+ return self._filename
+
+ def __len__(self):
+ return self._breakPoint
+
+ def __getitem__(self, i):
+ if not isinstance(i, int):
+ self.checkPos(i.stop)
+ else:
+ self.checkPos(i)
+ return self._src[i]
+
+ def __getslice__(self, i, j):
+ i = max(i, 0); j = max(j, 0)
+ return self._src[i:j]
+
+ def splitlines(self):
+ if not hasattr(self, '_srcLines'):
+ self._srcLines = self._src.splitlines()
+ return self._srcLines
+
+ def lineNum(self, pos=None):
+ if pos == None:
+ pos = self._pos
+
+ for i in range(len(self._BOLs)):
+ if pos >= self._BOLs[i] and pos <= self._EOLs[i]:
+ return i
+
+ def getRowCol(self, pos=None):
+ if pos == None:
+ pos = self._pos
+ lineNum = self.lineNum(pos)
+ BOL, EOL = self._BOLs[lineNum], self._EOLs[lineNum]
+ return lineNum+1, pos-BOL+1
+
+ def getRowColLine(self, pos=None):
+ if pos == None:
+ pos = self._pos
+ row, col = self.getRowCol(pos)
+ return row, col, self.splitlines()[row-1]
+
+ def getLine(self, pos):
+ if pos == None:
+ pos = self._pos
+ lineNum = self.lineNum(pos)
+ return self.splitlines()[lineNum]
+
+ def pos(self):
+ return self._pos
+
+ def setPos(self, pos):
+ self.checkPos(pos)
+ self._pos = pos
+
+
+ def validPos(self, pos):
+ return pos <= self._breakPoint and pos >=0
+
+ def checkPos(self, pos):
+ if not pos <= self._breakPoint: