Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Implemented a download timeout (fixes issue #50)

  • Loading branch information...
commit cda3a6811baa39d8a80f34c3dcda62fa5fc43acc 1 parent 4aa8b33
Nebelhom Nebelhom authored davehunt committed
Showing with 26 additions and 4 deletions.
  1. +2 −1  mozdownload/parser.py
  2. +24 −3 mozdownload/scraper.py
3  mozdownload/parser.py
View
@@ -7,6 +7,7 @@
from HTMLParser import HTMLParser
import re
import urllib
+import urllib2
class DirectoryParser(HTMLParser):
@@ -18,7 +19,7 @@ def __init__(self, url):
self.entries = [ ]
self.active_url = None
- req = urllib.urlopen(url)
+ req = urllib2.urlopen(url, timeout=60)
self.feed(req.read())
def filter(self, regex):
27 mozdownload/scraper.py
View
@@ -48,12 +48,20 @@ def __init__(self, message, location):
Exception.__init__(self, ': '.join([message, location]))
+class TimeoutException(Exception):
+ """Exception for a download exceeding the allocated timeout"""
+ def __init__(self):
+ self.message = 'The download exceeded the allocated timeout'
+ Exception.__init__(self, self.message)
+
+
class Scraper(object):
"""Generic class to download an application from the Mozilla server"""
def __init__(self, directory, version, platform=None,
application='firefox', locale='en-US', extension=None,
- authentication=None, retry_attempts=3, retry_delay=10):
+ authentication=None, retry_attempts=3, retry_delay=10,
+ timeout=180):
# Private properties for caching
self._target = None
@@ -67,6 +75,7 @@ def __init__(self, directory, version, platform=None,
self.authentication = authentication
self.retry_attempts = retry_attempts
self.retry_delay = retry_delay
+ self.timeout = timeout
# build the base URL
self.application = application
@@ -191,13 +200,17 @@ def download(self):
while True:
attempts += 1
try:
+ start_time = datetime.now()
r = urllib2.urlopen(self.final_url)
CHUNK = 16 * 1024
with open(tmp_file, 'wb') as f:
for chunk in iter(lambda: r.read(CHUNK), ''):
f.write(chunk)
+ t1 = (datetime.now() - start_time).total_seconds()
+ if t1 >= self.timeout:
+ raise TimeoutException
break
- except (urllib2.HTTPError, urllib2.URLError):
+ except (urllib2.HTTPError, urllib2.URLError, TimeoutException):
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file)
print 'Download failed! Retrying... (attempt %s)' % attempts
@@ -744,6 +757,13 @@ def cli():
metavar='RETRY_DELAY',
help='Amount of time (in seconds) to wait between retry '
'attempts, default: %default')
+ parser.add_option('--timeout',
+ dest='timeout',
+ default=180,
+ type=int,
+ metavar='TIMEOUT',
+ help='Amount of time (in seconds) until download times '
+ 'out, default: %default')
# Option group for candidate builds
group = OptionGroup(parser, "Candidate builds",
@@ -805,7 +825,8 @@ def cli():
'username': options.username,
'password': options.password},
'retry_attempts': options.retry_attempts,
- 'retry_delay': options.retry_delay}
+ 'retry_delay': options.retry_delay,
+ 'timeout': options.timeout}
scraper_options = {'candidate': {
'build_number': options.build_number,
'no_unsigned': options.no_unsigned},
Please sign in to comment.
Something went wrong with that request. Please try again.