Skip to content

Commit

Permalink
Merge efc3af0 into f9cfcc9
Browse files Browse the repository at this point in the history
  • Loading branch information
jcrugzz committed Jan 3, 2019
2 parents f9cfcc9 + efc3af0 commit 5c13058
Show file tree
Hide file tree
Showing 8 changed files with 188 additions and 67 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
@@ -1,3 +1,9 @@
## FUTURE

* Amazon use native `aws-sdk` for `s3` uploads instead of `s3-upload-stream` module
* Update the `aws-sdk` to `2.382.0`
* Amazon storage client.upload - expose concurrency - `queueSize` and `partSize` configurability and add ability to abort an upload

## v1.3.0
* OpenStack identity v3 (keystone) support, Issue [#367](//github.com/pkgcloud/pkgcloud/issues/367), [#477](//github.com/pkgcloud/pkgcloud/issues/477), PR [#461](//github.com/pkgcloud/pkgcloud/pull/461)
* OpenStack cancel client download, Issue [#379](//github.com/pkgcloud/pkgcloud/issues/379), PR [#416](//github.com/pkgcloud/pkgcloud/pull/416)
Expand Down
26 changes: 26 additions & 0 deletions docs/providers/amazon.md
Expand Up @@ -26,3 +26,29 @@ var client = require('pkgcloud').storage.createClient({
region: 'us-west-2' // region
});
```
### File upload

Whether s3 `multipart-upload` or `putObject` API is used depends on the `partSize` option value and the size of file being uploaded.
Single `putObject` request is made if an object being uploaded is not large enough. if the object size exceeds defined `partSize`, it uses `multipart-upload` API


```Javascript
var readableStream = fs.createReadStream('./path/to/file');

var writableStream = client.upload({
queueSize: 1, // == default value
partSize: 5 * 1024 * 1024, // == default value of 5MB
container: 'web-static',
remote: 'image.jpg'
});

//writableStream.managedUpload === https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3/ManagedUpload.html
// managedUpload object allows you to abort ongoing upload or track file upload progress.

readableStream.pipe(writableStream)
.on('success', function(file) {
console.log(file);
}).on('error', function(err) {
console.log(err);
});
```
31 changes: 20 additions & 11 deletions lib/pkgcloud/amazon/storage/client/files.js
Expand Up @@ -52,6 +52,11 @@ exports.upload = function (options) {
Key: options.remote instanceof base.File ? options.remote.name : options.remote
};

var s3Settings = {
queueSize: options.queueSize || 1,
partSize: options.partSize || 5 * 1024 * 1024
};

if (options.cacheControl) {
s3Options.CacheControl = options.cacheControl;
}
Expand All @@ -74,21 +79,25 @@ exports.upload = function (options) {
s3Options.ServerSideEncryption = options.ServerSideEncryption;
}

var proxyStream = through(),
writableStream = self.s3Stream.upload(s3Options);

// we need a writable stream because aws-sdk listens for an error event on writable
// stream and redirects it to the provided callback - without the writable stream
// the error would be emitted twice on the returned proxyStream
var writableStream = through();
// we need a proxy stream so we can always return a file model
// via the 'success' event
writableStream.on('uploaded', function(details) {
proxyStream.emit('success', new storage.File(self, details));
});
var proxyStream = through();

writableStream.on('error', function(err) {
proxyStream.emit('error', err);
});
s3Options.Body = writableStream;

var managedUpload = self.s3.upload(s3Options, s3Settings);

proxyStream.managedUpload = managedUpload;

writableStream.on('data', function (chunk) {
proxyStream.emit('data', chunk);
managedUpload.send(function(err, data) {
if (err) {
return proxyStream.emit('error', err);
}
return proxyStream.emit('success', new storage.File(self, data));
});

proxyStream.pipe(writableStream);
Expand Down
4 changes: 0 additions & 4 deletions lib/pkgcloud/amazon/storage/client/index.js
Expand Up @@ -7,7 +7,6 @@

var util = require('util'),
AWS = require('aws-sdk'),
s3Stream = require('s3-upload-stream'),
amazon = require('../../client'),
_ = require('lodash');

Expand All @@ -18,9 +17,6 @@ var Client = exports.Client = function (options) {
_.extend(this, require('./files'));

this.s3 = new AWS.S3(this._awsConfig);

// configure the s3Stream
this.s3Stream = s3Stream(this.s3);
};

util.inherits(Client, amazon.Client);
15 changes: 5 additions & 10 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 1 addition & 2 deletions package.json
Expand Up @@ -56,7 +56,7 @@
],
"dependencies": {
"async": "^2.6.1",
"aws-sdk": "^2.2.43",
"aws-sdk": "^2.382.0",
"errs": "^0.3.2",
"eventemitter2": "^5.0.1",
"fast-json-patch": "0.5.x",
Expand All @@ -69,7 +69,6 @@
"mime": "1.4.1",
"qs": "^6.5.2",
"request": "^2.88.0",
"s3-upload-stream": "~1.0.7",
"through2": "0.6.x",
"url-join": "0.0.x",
"xml2js": "0.1.x"
Expand Down

0 comments on commit 5c13058

Please sign in to comment.