Skip to content
Browse files

fix test for python 3.3

  • Loading branch information...
1 parent cfdc646 commit 716574f9089289a8c276127949f61f416343fdb6 @jamesturk jamesturk committed Apr 23, 2013
Showing with 16 additions and 12 deletions.
  1. +5 −0 docs/changelog.rst
  2. +2 −2 docs/conf.py
  3. +1 −1 scrapelib/__init__.py
  4. +3 −5 scrapelib/__main__.py
  5. +4 −3 scrapelib/tests/test_scraper.py
  6. +1 −1 setup.py
View
5 docs/changelog.rst
@@ -1,6 +1,11 @@
scrapelib changelog
===================
+0.9.0
+-----
+ * replace FTPSession with FTPAdapter
+ *
+
0.8.0
-----
**18 March 2013**
View
4 docs/conf.py
@@ -48,9 +48,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.8'
+version = '0.9'
# The full version, including alpha/beta/rc tags.
-release = '0.8.0'
+release = '0.9.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
View
2 scrapelib/__init__.py
@@ -24,7 +24,7 @@
from urllib import robotparser
_str_type = str
-__version__ = '0.8.0'
+__version__ = '0.9.0-dev'
_user_agent = ' '.join(('scrapelib', __version__,
requests.utils.default_user_agent()))
View
8 scrapelib/__main__.py
@@ -1,4 +1,5 @@
from . import Scraper, _user_agent
+import argparse
def scrapeshell(): # pragma: no cover
@@ -13,11 +14,6 @@ def scrapeshell(): # pragma: no cover
print('scrapeshell requires ipython >= 0.11')
return
try:
- import argparse
- except ImportError:
- print('scrapeshell requires argparse')
- return
- try:
import lxml.html
USE_LXML = True
except ImportError:
@@ -53,6 +49,8 @@ def scrapeshell(): # pragma: no cover
print('html: `scrapelib.ResultStr` instance')
if USE_LXML:
print('doc: `lxml HTML element`')
+ else:
+ print('doc not available: lxml not installed')
embed()
View
7 scrapelib/tests/test_scraper.py
@@ -302,7 +302,8 @@ def test_disable_compression():
# compression disabled
data = s.urlopen(HTTPBIN + 'headers')
- assert_equal(json.loads(data)['headers']['Accept-Encoding'], 'text/*')
+ assert 'compress' not in json.loads(data)['headers']['Accept-Encoding']
+ assert 'gzip' not in json.loads(data)['headers']['Accept-Encoding']
# default is restored
s.disable_compression = False
@@ -312,9 +313,9 @@ def test_disable_compression():
# A supplied Accept-Encoding headers overrides the
# disable_compression option
- s.headers['Accept-Encoding'] = '*'
+ s.headers['Accept-Encoding'] = 'xyz'
data = s.urlopen(HTTPBIN + 'headers')
- assert_equal(json.loads(data)['headers']['Accept-Encoding'], '*')
+ assert 'xyz' in json.loads(data)['headers']['Accept-Encoding']
def test_callable_headers():
View
2 setup.py
@@ -5,7 +5,7 @@
long_description = open('README.rst').read()
setup(name="scrapelib",
- version='0.8.0',
+ version='0.9.0-dev',
py_modules=['scrapelib'],
author="James Turk",
author_email='jturk@sunlightfoundation.com',

0 comments on commit 716574f

Please sign in to comment.
Something went wrong with that request. Please try again.