Skip to content

Commit

Permalink
print should now be used as print()
Browse files Browse the repository at this point in the history
  • Loading branch information
benoitc committed Apr 20, 2012
1 parent 0459948 commit 985c6c6
Show file tree
Hide file tree
Showing 10 changed files with 82 additions and 78 deletions.
35 changes: 18 additions & 17 deletions doc/buildweb.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.

from __future__ import with_statement
Expand All @@ -21,7 +21,7 @@
from sitemap_gen import CreateSitemapFromFile
import conf

class Site(object):
class Site(object):
def __init__(self):
self.url = conf.SITE_URL.rstrip('/')

Expand All @@ -47,22 +47,23 @@ def process(self, files, curr_path, tgt_path):
if not page.needed():
continue

print "Page: %s" % page.source
print("Page: %s" % page.source)
page.write()

def sass_compile(self):
print ""
print "Updating css..."
print("")
print("Updating css...")
try:
sp.check_call(["compass", "compile", "--boring"])
except sp.CalledProcessError:
print "Failed to update CSS"

sys.stderr.write("Failed to update CSS")
sys.stderr.flush()

def get_template(self, name):
return self.env.get_template(name)

class Page(object):

def __init__(self, site, filename, curr_path, tgt_path):
self.site = site
self.filename = filename
Expand All @@ -72,7 +73,7 @@ def __init__(self, site, filename, curr_path, tgt_path):

with open(self.source, 'Ur') as handle:
raw = handle.read()

try:
headers, body = raw.split("\n\n", 1)
except ValueError:
Expand All @@ -97,7 +98,7 @@ def __init__(self, site, filename, curr_path, tgt_path):

newext = self.headers.get('ext', '.html')
self.target = os.path.join(tgt_path, "%s%s" % (basename, newext))

def url(self):
path = self.target.split(conf.OUTPUT_PATH)[1].lstrip('/')
return "/".join([self.site.url, path])
Expand All @@ -106,10 +107,10 @@ def needed(self):
for f in "force --force -f".split():
if f in sys.argv[1:]:
return True

if not os.path.exists(self.target):
return True

smtime = os.stat(self.source).st_mtime
tmtime = os.stat(self.target).st_mtime
return smtime > tmtime
Expand Down Expand Up @@ -141,7 +142,7 @@ def convert_rst(self, body):
settings_overrides=overrides
)
lines = parts['html_body'].splitlines()

toppos, botpos = None, None
for idx, line in enumerate(lines):
if line.find("_TOC_TOP_") >= 0:
Expand Down Expand Up @@ -174,13 +175,13 @@ def fmt_setting(self, s):
val = " ::\n\n" + val
else:
val = "``%s``" % s.default

if s.cli and s.meta:
args = ["%s %s" % (arg, s.meta) for arg in s.cli]
cli = ', '.join(args)
elif s.cli:
cli = ", ".join(s.cli)

out = []
out.append("%s" % s.name)
out.append("~" * len(s.name))
Expand All @@ -196,6 +197,6 @@ def fmt_setting(self, s):

def main():
Site().render()

if __name__ == "__main__":
main()
68 changes: 34 additions & 34 deletions doc/sitemap_gen.py
Expand Up @@ -55,8 +55,8 @@
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
print('This script requires Python 2.2 or later.')
print('Currently run with version: %s' % sys.version)
sys.exit(1)

import fnmatch
Expand Down Expand Up @@ -291,7 +291,7 @@ def NarrowText(self, text, encoding):
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText

def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
Expand Down Expand Up @@ -381,7 +381,7 @@ def Log(self, text, level):
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
print(text)
#end def Log

def Warn(self, text):
Expand All @@ -391,7 +391,7 @@ def Warn(self, text):
hash = hashlib.md5(text).hexdigest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
print('[WARNING] ' + text)
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
Expand All @@ -404,7 +404,7 @@ def Error(self, text):
hash = hashlib.md5(text).hexdigest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
print('[ERROR] ' + text)
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
Expand All @@ -414,9 +414,9 @@ def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
print('[FATAL] ' + text)
else:
print 'Fatal error.'
print('Fatal error.')
sys.exit(1)
#end def Fatal

Expand Down Expand Up @@ -484,7 +484,7 @@ def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc

# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)

Expand Down Expand Up @@ -545,7 +545,7 @@ def Canonicalize(loc):
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType

# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
Expand Down Expand Up @@ -611,7 +611,7 @@ def MakeHash(self):
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'

for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
Expand All @@ -636,7 +636,7 @@ def WriteXML(self, file):
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))

out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
Expand Down Expand Up @@ -709,7 +709,7 @@ def Apply(self, url):
""" Process the URL, as above. """
if (not url) or (not url.loc):
return None

if self._wildcard:
if fnmatch.fnmatchcase(url.loc, self._wildcard):
return self._pass
Expand Down Expand Up @@ -738,7 +738,7 @@ def __init__(self, attributes):
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return

url = URL()
for attr in attributes.keys():
if attr == 'href':
Expand All @@ -749,7 +749,7 @@ def __init__(self, attributes):
if not url.loc:
output.Error('Url entries must have an href attribute.')
return

self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
#end def __init__
Expand All @@ -775,7 +775,7 @@ def __init__(self, attributes):

if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return

self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
Expand Down Expand Up @@ -808,7 +808,7 @@ def ProduceURLs(self, consumer):
line = line.strip()
if (not line) or line[0] == '#':
continue

# Split the line on space
url = URL()
cols = line.split(' ')
Expand Down Expand Up @@ -1156,7 +1156,7 @@ class InputSitemap(xml.sax.handler.ContentHandler):
"""

class _ContextBase(object):

"""Base class for context handlers in our SAX processing. A context
handler is a class that is responsible for understanding one level of
depth in the XML schema. The class knows what sub-tags are allowed,
Expand All @@ -1165,7 +1165,7 @@ class _ContextBase(object):
This base class is the API filled in by specific context handlers,
all defined below.
"""

def __init__(self, subtags):
"""Initialize with a sequence of the sub-tags that would be valid in
this context."""
Expand Down Expand Up @@ -1209,18 +1209,18 @@ def Return(self, result):
#end class _ContextBase

class _ContextUrlSet(_ContextBase):

"""Context handler for the document node in a Sitemap."""

def __init__(self):
InputSitemap._ContextBase.__init__(self, ('url',))
#end def __init__
#end class _ContextUrlSet

class _ContextUrl(_ContextBase):

"""Context handler for a URL node in a Sitemap."""

def __init__(self, consumer):
"""Initialize this context handler with the callable consumer that
wants our URLs."""
Expand All @@ -1241,7 +1241,7 @@ def Close(self):
self._consumer(self._url, False)
self._url = None
#end def Close

def Return(self, result):
"""A value context has closed, absorb the data it gave us."""
assert self._url
Expand All @@ -1251,9 +1251,9 @@ def Return(self, result):
#end class _ContextUrl

class _ContextSitemapIndex(_ContextBase):

"""Context handler for the document node in an index file."""

def __init__(self):
InputSitemap._ContextBase.__init__(self, ('sitemap',))
self._loclist = [] # List of accumulated Sitemap URLs
Expand All @@ -1271,7 +1271,7 @@ def Close(self):
self._loclist = []
return temp
#end def Close

def Return(self, result):
"""Getting a new loc URL, add it to the collection."""
if result:
Expand All @@ -1280,9 +1280,9 @@ def Return(self, result):
#end class _ContextSitemapIndex

class _ContextSitemap(_ContextBase):

"""Context handler for a Sitemap entry in an index file."""

def __init__(self):
InputSitemap._ContextBase.__init__(self, ('loc', 'lastmod'))
self._loc = None # The URL to the Sitemap
Expand Down Expand Up @@ -1310,10 +1310,10 @@ def Return(self, result):
#end class _ContextSitemap

class _ContextValue(_ContextBase):

"""Context handler for a single value. We return just the value. The
higher level context has to remember what tag led into us."""

def __init__(self):
InputSitemap._ContextBase.__init__(self, ())
self._text = None
Expand Down Expand Up @@ -1355,7 +1355,7 @@ def __init__(self, attributes):

if not ValidateAttributes('SITEMAP', attributes, ['path']):
return

# Init the first file path
path = attributes.get('path')
if path:
Expand Down Expand Up @@ -1388,7 +1388,7 @@ def ProduceURLs(self, consumer):
self._contexts_idx = [InputSitemap._ContextSitemapIndex(),
InputSitemap._ContextSitemap(),
InputSitemap._ContextValue()]

self._contexts_stm = [InputSitemap._ContextUrlSet(),
InputSitemap._ContextUrl(consumer),
InputSitemap._ContextValue()]
Expand All @@ -1408,7 +1408,7 @@ def ProduceURLs(self, consumer):
def _ProcessFile(self, path):
"""Do per-file reading/parsing/consuming for the file path passed in."""
assert path

# Open our file
(frame, file) = OpenFileForRead(path, 'SITEMAP')
if not file:
Expand Down

0 comments on commit 985c6c6

Please sign in to comment.