Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

use django-fancy-cache

  • Loading branch information...
commit d5b3cbcf094b0ff16bb79c692e53db556060699d 1 parent c591eb1
@peterbe authored
View
55 apps/homepage/base64allimages.py
@@ -0,0 +1,55 @@
+import re
+import logging
+import base64
+import urlparse
+import urllib
+from django.contrib.sites.models import RequestSite
+from django.conf import settings
+
+
+_img_regex = re.compile('(<img.*?src=(["\'])([^"\']+)(["\']).*?>)', re.DOTALL | re.M)
+
+
+def post_process_response(response, request):
+ current_url = request.build_absolute_uri().split('?')[0]
+ base_url = 'https://' if request.is_secure() else 'http://'
+ base_url += RequestSite(request).domain
+ current_url = urlparse.urljoin(base_url, request.path)
+ this_domain = urlparse.urlparse(current_url).netloc
+ def image_replacer(match):
+ bail = match.group()
+ whole, deli, src, deli = match.groups()
+ if src.startswith('//'):
+ if request.is_secure():
+ abs_src = 'https:' + src
+ else:
+ abs_src = 'http:' + src
+ else:
+ abs_src = urlparse.urljoin(current_url, src)
+ if urlparse.urlparse(abs_src).netloc != this_domain:
+ if settings.STATIC_URL and settings.STATIC_URL in abs_src:
+ pass
+ else:
+ return bail
+
+ img_response = urllib.urlopen(abs_src)
+ ct = img_response.headers['content-type']
+ if img_response.getcode() >= 300:
+ logging.warning(
+ "Unable to download %s (code: %s)",
+ abs_src, img_response.getcode()
+ )
+ return bail
+
+ img_content = img_response.read()
+ new_src = (
+ 'data:%s;base64,%s' %
+ (ct, base64.encodestring(img_content).replace('\n', ''))
+ )
+ old_src = 'src=%s%s%s' % (deli, src, deli)
+ new_src = 'src=%s%s%s' % (deli, new_src, deli)
+ new_src += ' data-orig-src=%s%s%s' % (deli, src, deli)
+ return bail.replace(old_src, new_src)
+
+ response.content = _img_regex.sub(image_replacer, response.content)
+ return response
View
151 apps/homepage/views.py
@@ -20,7 +20,8 @@
from apps.redisutils import get_redis_connection
from apps.rediscounter import redis_increment
from .utils import (parse_ocs_to_categories, make_categories_q, split_search)
-from apps.view_cache_utils import cache_page_with_prefix
+from fancy_cache import cache_page
+from apps.mincss_response import mincss_response
def _home_key_prefixer(request):
@@ -45,7 +46,7 @@ def _home_key_prefixer(request):
return prefix
-@cache_page_with_prefix(60 * 60, _home_key_prefixer)
+@cache_page(60 * 60, key_prefix=_home_key_prefixer)
def home(request, oc=None):
data = {}
qs = BlogItem.objects.filter(pub_date__lt=utc_now())
@@ -322,156 +323,24 @@ def append_queryset_search(queryset, order_by, words, model_name):
return render(request, 'homepage/search.html', data)
-_img_regex = re.compile('(<img.*?src=(["\'])([^"\']+)(["\']).*?>)', re.DOTALL | re.M)
-import logging
-import base64
-import urlparse
-import urllib
-from django.contrib.sites.models import RequestSite
-from django.conf import settings
+from .base64allimages import post_process_response as b64_post_process_response
-def post_process_response(response, request):
- current_url = request.build_absolute_uri().split('?')[0]
- base_url = 'https://' if request.is_secure() else 'http://'
- base_url += RequestSite(request).domain
- current_url = urlparse.urljoin(base_url, request.path)
- this_domain = urlparse.urlparse(current_url).netloc
- def image_replacer(match):
- bail = match.group()
- whole, deli, src, deli = match.groups()
- if src.startswith('//'):
- if request.is_secure():
- abs_src = 'https:' + src
- else:
- abs_src = 'http:' + src
- else:
- abs_src = urlparse.urljoin(current_url, src)
- if urlparse.urlparse(abs_src).netloc != this_domain:
- if settings.STATIC_URL and settings.STATIC_URL in abs_src:
- pass
- else:
- return bail
-
- img_response = urllib.urlopen(abs_src)
- ct = img_response.headers['content-type']
- if img_response.getcode() >= 300:
- logging.warning(
- "Unable to download %s (code: %s)",
- abs_src, img_response.getcode()
- )
- return bail
-
- img_content = img_response.read()
- new_src = (
- 'data:%s;base64,%s' %
- (ct, base64.encodestring(img_content).replace('\n', ''))
- )
- old_src = 'src=%s%s%s' % (deli, src, deli)
- new_src = 'src=%s%s%s' % (deli, new_src, deli)
- new_src += ' data-orig-src=%s%s%s' % (deli, src, deli)
- return bail.replace(old_src, new_src)
-
- response.content = _img_regex.sub(image_replacer, response.content)
- return response
-
-def _aboutprefixer(request):
- return '1'
-#@cache_page(60 * 60 * 1)
-@cache_page_with_prefix(60 * 60, _aboutprefixer, post_process_response=post_process_response)
+@cache_page(60 * 60, post_process_response=b64_post_process_response)
def about2(request):
return render(request, 'homepage/about.html')
-@cache_page(60 * 60 * 1)
+
+@cache_page(60 * 60, post_process_response=mincss_response)
def about(request):
return render(request, 'homepage/about.html')
-try:
- from mincss.processor import Processor
- try:
- import cssmin
- except ImportError:
- logging.warning("Unable to import cssmin", exc_info=True)
- cssmin = None
-except ImportError:
- logging.warning("Unable to import mincss", exc_info=True)
- Processor = None
-
-
-_style_regex = re.compile('<style.*?</style>', re.M | re.DOTALL)
-_link_regex = re.compile('<link.*?>', re.M | re.DOTALL)
-
-def _mincss_response(response, request):
- if Processor is None or cssmin is None:
- logging.info("No _mincss_response() possible")
- return response
-
- html = response.content
- p = Processor()
- p.process_html(html, request.build_absolute_uri())
- p.process()
- combined_css = []
- _total_before = 0
- _requests_before = 0
- for link in p.links:
- _total_before += len(link.before)
- _requests_before += 1
- #combined_css.append('/* %s */' % link.href)
- combined_css.append(link.after)
-
- for inline in p.inlines:
- _total_before += len(inline.before)
- combined_css.append(inline.after)
-
- if p.inlines:
- html = _style_regex.sub('', html)
- found_link_hrefs = [x.href for x in p.links]
- def link_remover(m):
- bail = m.group()
- for each in found_link_hrefs:
- if each in bail:
- return ''
- return bail
- html = _link_regex.sub(link_remover, html)
-
- _total_after = sum(len(x) for x in combined_css)
- combined_css = [cssmin.cssmin(x) for x in combined_css]
- _total_after_min = sum(len(x) for x in combined_css)
-
- stats_css = (
-"""
-/*
-Stats about using mincss
-------------------------
-Requests: %s (now: 0)
-Before: %.fKb
-After: %.fKb
-After (minified): %.fKb
-Saving: %.fKb
-*/"""
- % (_requests_before,
- _total_before / 1024.,
- _total_after / 1024.,
- _total_after_min / 1024.,
- (_total_before - _total_after) / 1024.)
-
- )
- combined_css.insert(0, stats_css)
- new_style = (
- '<style type="text/css">\n%s\n</style>' %
- ('\n'.join(combined_css)).strip()
- )
- html = html.replace(
- '</head>',
- new_style + '\n</head>'
- )
- response.content = html
- return response
-@cache_page_with_prefix(60 * 60, _aboutprefixer, post_process_response=_mincss_response)
+@cache_page(60 * 60, post_process_response=mincss_response)
def about3(request):
return render(request, 'homepage/about.html')
-@cache_page(60 * 60 * 24)
+
+@cache_page(60 * 60 * 24, post_process_response=mincss_response)
def contact(request):
return render(request, 'homepage/contact.html')
View
80 apps/mincss_response.py
@@ -0,0 +1,80 @@
+import re
+from mincss.processor import Processor
+try:
+ import cssmin
+except ImportError:
+ logging.warning("Unable to import cssmin", exc_info=True)
+ cssmin = None
+
+_style_regex = re.compile('<style.*?</style>', re.M | re.DOTALL)
+_link_regex = re.compile('<link.*?>', re.M | re.DOTALL)
+
+
+def mincss_response(response, request):
+ if Processor is None or cssmin is None:
+ logging.info("No mincss_response() possible")
+ return response
+
+ html = unicode(response.content, 'utf-8')
+ p = Processor()
+ p.process_html(html, request.build_absolute_uri())
+ p.process()
+ combined_css = []
+ _total_before = 0
+ _requests_before = 0
+ for link in p.links:
+ _total_before += len(link.before)
+ _requests_before += 1
+ #combined_css.append('/* %s */' % link.href)
+ combined_css.append(link.after)
+
+ for inline in p.inlines:
+ _total_before += len(inline.before)
+ combined_css.append(inline.after)
+
+ if p.inlines:
+ html = _style_regex.sub('', html)
+ found_link_hrefs = [x.href for x in p.links]
+
+ def link_remover(m):
+ bail = m.group()
+ for each in found_link_hrefs:
+ if each in bail:
+ return ''
+ return bail
+
+ html = _link_regex.sub(link_remover, html)
+
+ _total_after = sum(len(x) for x in combined_css)
+ combined_css = [cssmin.cssmin(x) for x in combined_css]
+ _total_after_min = sum(len(x) for x in combined_css)
+
+ stats_css = (
+"""
+/*
+Stats about using github.com/peterbe/mincss
+-------------------------------------------
+Requests: %s (now: 0)
+Before: %.fKb
+After: %.fKb
+After (minified): %.fKb
+Saving: %.fKb
+*/"""
+ % (_requests_before,
+ _total_before / 1024.,
+ _total_after / 1024.,
+ _total_after_min / 1024.,
+ (_total_before - _total_after) / 1024.)
+
+ )
+ combined_css.insert(0, stats_css)
+ new_style = (
+ '<style type="text/css">\n%s\n</style>' %
+ ('\n'.join(combined_css)).strip()
+ )
+ html = html.replace(
+ '</head>',
+ new_style + '\n</head>'
+ )
+ response.content = html.encode('utf-8')
+ return response
View
24 apps/plog/views.py
@@ -30,7 +30,8 @@
from .utils import render_comment_text, valid_email, utc_now
from apps.redisutils import get_redis_connection
from apps.rediscounter import redis_increment
-from apps.view_cache_utils import cache_page_with_prefix
+from fancy_cache import cache_page
+from apps.mincss_response import mincss_response
from . import tasks
from . import utils
from .forms import BlogForm, BlogFileUpload
@@ -96,7 +97,11 @@ def _blog_post_key_prefixer(request):
return prefix
-@cache_page_with_prefix(ONE_WEEK, _blog_post_key_prefixer)
+@cache_page(
+ ONE_WEEK,
+ _blog_post_key_prefixer,
+ post_process_response=mincss_response
+)
def blog_post(request, oid):
if oid.endswith('/'):
oid = oid[:-1]
@@ -402,12 +407,12 @@ def _plog_index_key_prefixer(request):
prefix += str(latest_date)
return prefix
-@cache_page_with_prefix(ONE_DAY, _plog_index_key_prefixer)
+@cache_page(
+ ONE_DAY,
+ _plog_index_key_prefixer,
+ post_process_response=mincss_response
+)
def plog_index(request):
-
- # this is temporarily here to see how often this is actually rendered
- logging.info("PSEUDO-DEBUGGING cache miss on plog_index")
-
groups = defaultdict(list)
now = utc_now()
group_dates = []
@@ -455,7 +460,7 @@ def _new_comment_key_prefixer(request):
return prefix
-@cache_page_with_prefix(ONE_HOUR, _new_comment_key_prefixer)
+@cache_page(ONE_HOUR, _new_comment_key_prefixer)
def new_comments(request):
data = {}
comments = BlogComment.objects.all()
@@ -471,7 +476,7 @@ def new_comments(request):
data['comments'] = (comments
.order_by('-add_date')
- .select_related('blogitem')[:100])
+ .select_related('blogitem')[:50])
return render(request, 'plog/new-comments.html', data)
@@ -688,6 +693,7 @@ def delete_post_thumbnail(request):
)
+@cache_page(ONE_DAY)
def calendar(request):
data = {'page_title': 'Archive calendar'}
return render(request, 'plog/calendar.html', data)
View
10 apps/view_cache_utils/prefix_functions.py
@@ -1,10 +0,0 @@
-
-auth_key_prefixes = {True:'logged_in', False:'not_logged_in'}
-
-def auth_key_prefix(request):
- ''' key prefix for exactly 2 versions of page: for authenticated and for anonymous users.
- '''
- if request.GET:
- return None #magic value to disable caching
- res = auth_key_prefixes[request.user.is_authenticated()]
- return res
View
11 peterbecom/settings/base.py
@@ -101,9 +101,10 @@
JINGO_EXCLUDE_APPS = (
- 'debug_toolbar',
- 'admin',
- 'bootstrapform',
+ 'debug_toolbar',
+ 'admin',
+ 'bootstrapform',
+ 'fancy_cache',
)
MIDDLEWARE_CLASSES = (
@@ -157,6 +158,7 @@
'apps.homepage',
'apps.legacy',
'apps.stats',
+ 'fancy_cache',
)
# A sample logging configuration. The only tangible logging
@@ -236,3 +238,6 @@
LOGIN_URL = '/admin/'
INBOUND_EMAIL_ADDRESS = 'setup@yourown.com'
+
+FANCY_REMEMBER_ALL_URLS = True
+FANCY_REMEMBER_STATS_ALL_URLS = True
View
2  requirements.txt
@@ -16,3 +16,5 @@ markdown
sorl-thumbnail
PIL
mincss
+django-fancy-cache
+cssmin
Please sign in to comment.
Something went wrong with that request. Please try again.