Skip to content

Commit

Permalink
Merged slinkp-1447-httpcache-fix-branch, -r 67975:67977.
Browse files Browse the repository at this point in the history
Fixes issue with AcceleratedHTTPCacheManager sending PURGE from a
virtual-hosted zope, and adds a bunch of related tests and comments.
  • Loading branch information
slinkp committed May 5, 2006
1 parent 3dc849c commit 0764081
Show file tree
Hide file tree
Showing 2 changed files with 157 additions and 68 deletions.
67 changes: 52 additions & 15 deletions AcceleratedHTTPCacheManager.py
Expand Up @@ -20,6 +20,8 @@

from OFS.Cache import Cache, CacheManager
from OFS.SimpleItem import SimpleItem
import logging
import socket
import time
import Globals
from Globals import DTMLFile
Expand All @@ -29,10 +31,15 @@
from App.Common import rfc1123_date


logger = logging.getLogger('Zope.AcceleratedHTTPCacheManager')

class AcceleratedHTTPCache (Cache):
# Note the need to take thread safety into account.
# Also note that objects of this class are not persistent,
# nor do they use acquisition.

connection_factory = httplib.HTTPConnection

def __init__(self):
self.hit_counts = {}

Expand All @@ -42,14 +49,30 @@ def initSettings(self, kw):
self.__dict__.update(kw)

def ZCache_invalidate(self, ob):
# Note that this only works for default views of objects.
# Note that this only works for default views of objects at
# their canonical path. If an object is viewed and cached at
# any other path via acquisition or virtual hosting, that
# cache entry cannot be purged because there is an infinite
# number of such possible paths, and Squid does not support
# any kind of fuzzy purging; we have to specify exactly the
# URL to purge. So we try to purge the known paths most
# likely to turn up in practice: the physical path and the
# current absolute_url_path. Any of those can be
# wrong in some circumstances, but it may be the best we can
# do :-(
# It would be nice if Squid's purge feature was better
# documented. (pot! kettle! black!)

phys_path = ob.getPhysicalPath()
if self.hit_counts.has_key(phys_path):
del self.hit_counts[phys_path]
ob_path = quote('/'.join(phys_path))
purge_paths = (ob.absolute_url_path(), quote('/'.join(phys_path)))
# Don't purge the same path twice.
if purge_paths[0] == purge_paths[1]:
purge_paths = purge_paths[:1]
results = []
for url in self.notify_urls:
if not url:
if not url.strip():
continue
# Send the PURGE request to each HTTP accelerator.
if url[:7].lower() == 'http://':
Expand All @@ -58,23 +81,37 @@ def ZCache_invalidate(self, ob):
u = 'http://' + url
(scheme, host, path, params, query, fragment
) = urlparse.urlparse(u)
if path[-1:] == '/':
p = path[:-1] + ob_path
else:
p = path + ob_path
h = httplib.HTTPConnection(host)
h.request('PURGE', p)
r = h.getresponse()
results.append('%s %s' % (r.status, r.reason))
if path.lower().startswith('/http://'):
path = path.lstrip('/')
for ob_path in purge_paths:
p = path.rstrip('/') + ob_path
h = self.connection_factory(host)
logger.debug('PURGING host %s, path %s' % (host, p))
# An exception on one purge should not prevent the others.
try:
h.request('PURGE', p)
# This better not hang. I wish httplib gave us
# control of timeouts.
except socket.gaierror:
msg = 'socket.gaierror: maybe the server ' + \
'at %s is down, or the cache manager ' + \
'is misconfigured?'
logger.error(msg % url)
continue
r = h.getresponse()
status = '%s %s' % (r.status, r.reason)
results.append(status)
logger.debug('purge response: %s' % status)
return 'Server response(s): ' + ';'.join(results)

def ZCache_get(self, ob, view_name, keywords, mtime_func, default):
return default

def ZCache_set(self, ob, data, view_name, keywords, mtime_func):
# Note the blatant ignorance of view_name, keywords, and
# mtime_func. Standard HTTP accelerators are not able to make
# use of this data.
# Note the blatant ignorance of view_name and keywords.
# Standard HTTP accelerators are not able to make use of this
# data. mtime_func is also ignored because using "now" for
# Last-Modified is as good as using any time in the past.
REQUEST = ob.REQUEST
RESPONSE = REQUEST.RESPONSE
anon = 1
Expand Down Expand Up @@ -151,7 +188,7 @@ def ZCacheManager_getCache(self):

def getSettings(self):
' '
return self._settings.copy() # Don't let DTML modify it.
return self._settings.copy() # Don't let UI modify it.

manage_main = DTMLFile('dtml/propsAccel', globals())

Expand Down
158 changes: 105 additions & 53 deletions tests/test_AcceleratedHTTPCacheManager.py
Expand Up @@ -15,87 +15,139 @@
$Id$
"""

import unittest
import threading
import time
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from Products.StandardCacheManagers.AcceleratedHTTPCacheManager \
import AcceleratedHTTPCache, AcceleratedHTTPCacheManager

class PurgingHTTPRequestHandler(SimpleHTTPRequestHandler):

protocol_version = 'HTTP/1.0'
class DummyObject:

def do_PURGE(self):
def __init__(self, path='/path/to/object', urlpath=None):
self.path = path
if urlpath is None:
self.urlpath = path
else:
self.urlpath = urlpath

"""Serve a PURGE request."""
self.server.test_case.purged_host = self.headers.get('Host','xxx')
self.server.test_case.purged_path = self.path
self.send_response(200)
self.end_headers()
def getPhysicalPath(self):
return tuple(self.path.split('/'))

def log_request(self, code='ignored', size='ignored'):
pass
def absolute_url_path(self):
return self.urlpath

class MockResponse:
status = '200'
reason = "who knows, I'm just a mock"

class DummyObject:
def MockConnectionClassFactory():
# Returns both a class that mocks an HTTPConnection,
# and a reference to a data structure where it logs requests.
request_log = []

_PATH = '/path/to/object'
class MockConnection:
# Minimal replacement for httplib.HTTPConnection.
def __init__(self, host):
self.host = host
self.request_log = request_log

def getPhysicalPath(self):
return tuple(self._PATH.split('/'))
def request(self, method, path):
self.request_log.append({'method':method,
'host':self.host,
'path':path,})
def getresponse(self):
return MockResponse()

class AcceleratedHTTPCacheTests(unittest.TestCase):
return MockConnection, request_log

_SERVER_PORT = 1888
thread = purged_host = purged_path = None

def tearDown(self):
if self.thread:
self.httpd.server_close()
self.thread.join(2)
class AcceleratedHTTPCacheTests(unittest.TestCase):

def _getTargetClass(self):

from Products.StandardCacheManagers.AcceleratedHTTPCacheManager \
import AcceleratedHTTPCache

return AcceleratedHTTPCache

def _makeOne(self, *args, **kw):

return self._getTargetClass()(*args, **kw)

def _handleServerRequest(self):

server_address = ('', self._SERVER_PORT)

self.httpd = HTTPServer(server_address, PurgingHTTPRequestHandler)
self.httpd.test_case = self

sa = self.httpd.socket.getsockname()
self.thread = threading.Thread(target=self.httpd.handle_request)
self.thread.setDaemon(True)
self.thread.start()
time.sleep(0.2) # Allow time for server startup

def test_PURGE_passes_Host_header(self):

_TO_NOTIFY = 'localhost:%d' % self._SERVER_PORT

_TO_NOTIFY = 'localhost:1888'
cache = self._makeOne()
cache.notify_urls = ['http://%s' % _TO_NOTIFY]
object = DummyObject()
cache.connection_factory, requests = MockConnectionClassFactory()
dummy = DummyObject()
cache.ZCache_invalidate(dummy)
self.assertEqual(len(requests), 1)
result = requests[-1]
self.assertEqual(result['method'], 'PURGE')
self.assertEqual(result['host'], _TO_NOTIFY)
self.assertEqual(result['path'], dummy.path)

def test_multiple_notify(self):
cache = self._makeOne()
cache.notify_urls = ['http://foo', 'bar', 'http://baz/bat']
cache.connection_factory, requests = MockConnectionClassFactory()
cache.ZCache_invalidate(DummyObject())
self.assertEqual(len(requests), 3)
self.assertEqual(requests[0]['host'], 'foo')
self.assertEqual(requests[1]['host'], 'bar')
self.assertEqual(requests[2]['host'], 'baz')
cache.ZCache_invalidate(DummyObject())
self.assertEqual(len(requests), 6)

def test_vhost_purging_1447(self):
# Test for http://www.zope.org/Collectors/Zope/1447
cache = self._makeOne()
cache.notify_urls = ['http://foo.com']
cache.connection_factory, requests = MockConnectionClassFactory()
dummy = DummyObject(urlpath='/published/elsewhere')
cache.ZCache_invalidate(dummy)
# That should fire off two invalidations,
# one for the physical path and one for the abs. url path.
self.assertEqual(len(requests), 2)
self.assertEqual(requests[0]['path'], dummy.absolute_url_path())
self.assertEqual(requests[1]['path'], dummy.path)

# Run the HTTP server for this test.
self._handleServerRequest()

cache.ZCache_invalidate(object)
class CacheManagerTests(unittest.TestCase):

def _getTargetClass(self):
return AcceleratedHTTPCacheManager

def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)

def _makeContext(self):
from OFS.Folder import Folder
root = Folder()
root.getPhysicalPath = lambda: ('', 'some_path',)
cm_id = 'http_cache'
manager = self._makeOne(cm_id)
root._setObject(cm_id, manager)
manager = root[cm_id]
return root, manager

def test_add(self):
# ensure __init__ doesn't raise errors.
root, cachemanager = self._makeContext()

def test_ZCacheManager_getCache(self):
root, cachemanager = self._makeContext()
cache = cachemanager.ZCacheManager_getCache()
self.assert_(isinstance(cache, AcceleratedHTTPCache))

def test_getSettings(self):
root, cachemanager = self._makeContext()
settings = cachemanager.getSettings()
self.assert_('anonymous_only' in settings.keys())
self.assert_('interval' in settings.keys())
self.assert_('notify_urls' in settings.keys())

self.assertEqual(self.purged_host, _TO_NOTIFY)
self.assertEqual(self.purged_path, DummyObject._PATH)

def test_suite():
return unittest.makeSuite(AcceleratedHTTPCacheTests)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AcceleratedHTTPCacheTests))
suite.addTest(unittest.makeSuite(CacheManagerTests))
return suite

if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
Expand Down

0 comments on commit 0764081

Please sign in to comment.