Skip to content
Browse files

Add speed limiting option for uploads

  • Loading branch information...
1 parent 0151fa4 commit 3a64621d4f523848a17e95f973d35b494d59f8f3 @joelw committed Mar 2, 2012
Showing with 23 additions and 1 deletion.
  1. +1 −0 S3/Config.py
  2. +21 −1 S3/S3.py
  3. +1 −0 s3cmd
View
1 S3/Config.py
@@ -83,6 +83,7 @@ class Config(object):
website_index = "index.html"
website_error = ""
website_endpoint = "http://%(bucket)s.s3-website-%(location)s.amazonaws.com/"
+ speed_limit = 0
## Creating a singleton
def __new__(self, configfile = None):
View
22 S3/S3.py
@@ -10,6 +10,7 @@
import logging
import mimetypes
import re
+import datetime
from logging import debug, info, warning, error
from stat import ST_SIZE
@@ -654,16 +655,35 @@ def send_file(self, request, file, labels, throttle = 0, retries = _max_retries,
file.seek(offset)
md5_hash = md5()
try:
+ self.sleep_adjust = 0.0
while (size_left > 0):
#debug("SendFile: Reading up to %d bytes from '%s'" % (self.config.send_chunk, file.name))
- data = file.read(min(self.config.send_chunk, size_left))
+ sendsize = min(self.config.send_chunk, size_left)
+ data = file.read(sendsize)
md5_hash.update(data)
+ time_before = datetime.datetime.now()
conn.send(data)
+ time_after = datetime.datetime.now()
+ elapsed_time = (time_after - time_before).total_seconds()
if self.config.progress_meter:
progress.update(delta_position = len(data))
size_left -= len(data)
if throttle:
time.sleep(throttle)
+ # Speed limit
+ if self.config.speed_limit > 0:
+ expected_time = float(sendsize) / (self.config.speed_limit*1024)
+ debug("sendsize %s, Expected %s, Actual %s", sendsize, expected_time, elapsed_time)
+ if expected_time > elapsed_time:
+ time_before = datetime.datetime.now()
+ time.sleep(expected_time - elapsed_time + self.sleep_adjust)
+ time_after = datetime.datetime.now()
+ self.sleep_adjust = (expected_time - elapsed_time) - (time_after - time_before).total_seconds()
+ debug("Sleep adjust %s", self.sleep_adjust)
+ if self.sleep_adjust > 0.5:
+ self.sleep_adjust = 0.5
+ if self.sleep_adjust < -0.5:
+ self.sleep_adjust = -0.5
md5_computed = md5_hash.hexdigest()
response = {}
http_response = conn.getresponse()
View
1 s3cmd
@@ -1548,6 +1548,7 @@ def main():
optparser.add_option("-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="Enable debug output.")
optparser.add_option( "--version", dest="show_version", action="store_true", help="Show s3cmd version (%s) and exit." % (PkgInfo.version))
optparser.add_option("-F", "--follow-symlinks", dest="follow_symlinks", action="store_true", default=False, help="Follow symbolic links as if they are regular files")
+ optparser.add_option( "--speedlimit", dest="speed_limit", action="store", metavar="SPEED", default=0, help="Limit transfer to SPEED KB/s")
optparser.set_usage(optparser.usage + " COMMAND [parameters]")
optparser.set_description('S3cmd is a tool for managing objects in '+

2 comments on commit 3a64621

@integritydc

Excellent! This was what I needed to keep my server from slowing down while moving backups to Amazon. This still works in the latest version of s3cmd (1.5.0-beta1) although, obviously, the line numbers aren't current. Also since I'm using Python 2.6, total_seconds() was not available. Had to make a couple of modifications to calculate seconds since the function wasn't available. Thanks for your submission!

@joelw
Owner
joelw commented on 3a64621 Mar 8, 2014

I'm glad to hear it was useful! :) It seemed like a glaring omission to me as well.

From memory, when I wrote it s3cmd wasn't really being actively maintained and pull requests weren't being processed, but last time I checked the situation had changed. If you've got it working with the latest versions of s3cmd and Python, please consider submitting your patch!

Please sign in to comment.
Something went wrong with that request. Please try again.