Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

add a test for the httplib2 NoneType break

  • Loading branch information...
commit 0d5065d76faa15cfa118e4d1f88bb3c27ca8184c 1 parent 6362dc8
@jamesturk authored
Showing with 34 additions and 1 deletion.
  1. +1 −0  coverage.sh
  2. +0 −1  scrapelib.py
  3. +33 −0 test.py
View
1  coverage.sh
@@ -1 +1,2 @@
+rm -rf cover/
nosetests --cover-html --with-coverage --cover-package=scrapelib
View
1  scrapelib.py
@@ -363,7 +363,6 @@ def _make_headers(self, url):
def follow_redirects(self):
if self._http:
return self._http.follow_redirects
- return False
@follow_redirects.setter
def follow_redirects(self, value):
View
33 test.py
@@ -339,6 +339,39 @@ def side_effect(*args, **kwargs):
self.assertEqual(resp, "success!")
self.assertEqual(mock_request.call_count, 2)
+ def test_httplib2_nasty_workaround(self):
+ """ test workaround for httplib2 breakage """
+ count = []
+
+ # On the first call raise socket.timeout
+ # On subsequent calls pass through to httplib2.Http.request
+ def side_effect(*args, **kwargs):
+ if count:
+ return httplib2.Response({'status': 200}), 'success!'
+ count.append(1)
+ raise AttributeError("'NoneType' object has no attribute "
+ "'makefile'")
+
+ mock_request = mock.Mock(side_effect=side_effect)
+
+ with mock.patch.object(httplib2.Http, 'request', mock_request):
+ s = scrapelib.Scraper(retry_attempts=0, retry_wait_seconds=0.001,
+ follow_robots=False)
+ # try only once, get the error
+ self.assertRaises(AttributeError, self.s.urlopen, "http://dummy/")
+ self.assertEqual(mock_request.call_count, 1)
+
+ mock_request.reset_mock()
+ count = []
+ with mock.patch.object(httplib2.Http, 'request', mock_request):
+ s = scrapelib.Scraper(retry_attempts=2, retry_wait_seconds=0.001,
+ follow_robots=False)
+ resp = s.urlopen("http://dummy/")
+ # get the result, take two tries
+ self.assertEqual(resp, "success!")
+ self.assertEqual(mock_request.call_count, 2)
+
+
def test_disable_compression(self):
s = scrapelib.Scraper(disable_compression=True)
Please sign in to comment.
Something went wrong with that request. Please try again.