Skip to content

Loading…

Multipart upload #14

Merged
merged 7 commits into from

1 participant

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Commits on Sep 20, 2012
  1. Implementation of multipart upload and multiobject delete

    robert clarke committed
Commits on Sep 21, 2012
  1. change tests to use File::Temp, rather than modify distribution files…

    robert clarke committed
    … in place
  2. sort out messy comments

    robert clarke committed
  3. tidy up minor scruffiness

    robert clarke committed
  4. Move put_part content length calculation to Net::Amazon::S3::ClientOb…

    robert clarke committed
    …ject, tidy up handling of intitiate_multipart_upload request
Commits on Nov 28, 2012
  1. Merge multipart upload patch (#13)

    committed
  2. Making Perl::Critic happy

    committed
View
9 lib/Net/Amazon/S3.pm 100644 → 100755
@@ -112,16 +112,21 @@ use Net::Amazon::S3::Client::Bucket;
use Net::Amazon::S3::Client::Object;
use Net::Amazon::S3::HTTPRequest;
use Net::Amazon::S3::Request;
+use Net::Amazon::S3::Request::CompleteMultipartUpload;
use Net::Amazon::S3::Request::CreateBucket;
use Net::Amazon::S3::Request::DeleteBucket;
+use Net::Amazon::S3::Request::DeleteMultiObject;
use Net::Amazon::S3::Request::DeleteObject;
use Net::Amazon::S3::Request::GetBucketAccessControl;
use Net::Amazon::S3::Request::GetBucketLocationConstraint;
use Net::Amazon::S3::Request::GetObject;
use Net::Amazon::S3::Request::GetObjectAccessControl;
+use Net::Amazon::S3::Request::InitiateMultipartUpload;
use Net::Amazon::S3::Request::ListAllMyBuckets;
use Net::Amazon::S3::Request::ListBucket;
+use Net::Amazon::S3::Request::ListParts;
use Net::Amazon::S3::Request::PutObject;
+use Net::Amazon::S3::Request::PutPart;
use Net::Amazon::S3::Request::SetBucketAccessControl;
use Net::Amazon::S3::Request::SetObjectAccessControl;
use LWP::UserAgent::Determined;
@@ -196,13 +201,13 @@ sub BUILD {
if ( $self->retry ) {
$ua = LWP::UserAgent::Determined->new(
keep_alive => $KEEP_ALIVE_CACHESIZE,
- requests_redirectable => [qw(GET HEAD DELETE PUT)],
+ requests_redirectable => [qw(GET HEAD DELETE PUT POST)],
);
$ua->timing('1,2,4,8,16,32');
} else {
$ua = LWP::UserAgent->new(
keep_alive => $KEEP_ALIVE_CACHESIZE,
- requests_redirectable => [qw(GET HEAD DELETE PUT)],
+ requests_redirectable => [qw(GET HEAD DELETE PUT POST)],
);
}
View
19 lib/Net/Amazon/S3/Client/Bucket.pm 100644 → 100755
@@ -132,6 +132,18 @@ sub list {
);
}
+sub delete_multi_object {
+ my $self = shift;
+ my @objects = @_;
+ return unless( scalar(@objects) );
+ my $http_request = Net::Amazon::S3::Request::DeleteMultiObject->new(
+ s3 => $self->client->s3,
+ bucket => $self->name,
+ keys => [ map($_->key, @objects) ],
+ )->http_request;
+ return $self->client->_send_request($http_request);
+}
+
sub object {
my ( $self, %conf ) = @_;
return Net::Amazon::S3::Client::Object->new(
@@ -141,6 +153,7 @@ sub object {
);
}
+
1;
__END__
@@ -228,3 +241,9 @@ This module represents buckets.
# be used to get or put
my $object = $bucket->object( key => 'this is the key' );
+=head2 delete_multi_object
+
+ # delete multiple objects using a multi object delete operation
+ # Accepts a list of L<Net::Amazon::S3::Client::Object> objects.
+ # Limited to a maximum of 1000 objects in one operation
+ $bucket->delete_multi_object($object1, $object2)
View
113 lib/Net/Amazon/S3/Client/Object.pm 100644 → 100755
@@ -74,18 +74,15 @@ sub get {
my $content = $http_response->content;
my $md5_hex = md5_hex($content);
+ my $etag = $self->etag || $self->_etag($http_response);
+ confess 'Corrupted download'
+ if( !$self->_is_multipart_etag($etag) && $etag ne $md5_hex);
- if ( $self->etag ) {
- confess 'Corrupted download' if $self->etag ne $md5_hex;
- } else {
- confess 'Corrupted download'
- if $self->_etag($http_response) ne $md5_hex;
- }
return $content;
}
sub get_filename {
- my ( $self, $filename ) = @_;
+ my ($self, $filename) = @_;
my $http_request = Net::Amazon::S3::Request::GetObject->new(
s3 => $self->client->s3,
@@ -95,16 +92,13 @@ sub get_filename {
)->http_request;
my $http_response
- = $self->client->_send_request( $http_request, $filename );
+ = $self->client->_send_request($http_request, $filename);
my $md5_hex = file_md5_hex($filename);
- if ( $self->etag ) {
- confess 'Corrupted download' if $self->etag ne $md5_hex;
- } else {
- confess 'Corrupted download'
- if $self->_etag($http_response) ne $md5_hex;
- }
+ my $etag = $self->etag || $self->_etag($http_response);
+ confess
+ 'Corrupted download' if(!$self->_is_multipart_etag($etag) && $etag ne $md5_hex);
}
sub put {
@@ -210,6 +204,60 @@ sub delete {
$self->client->_send_request($http_request);
}
+sub initiate_multipart_upload {
+ my $self = shift;
+ my $http_request = Net::Amazon::S3::Request::InitiateMultipartUpload->new(
+ s3 => $self->client->s3,
+ bucket => $self->bucket->name,
+ key => $self->key,
+ )->http_request;
+ my $xpc = $self->client->_send_request_xpc($http_request);
+ my $upload_id = $xpc->findvalue('//s3:UploadId');
+ confess "Couldn't get upload id from initiate_multipart_upload response XML"
+ unless $upload_id;
+
+ return $upload_id;
+}
+
+sub complete_multipart_upload {
+ my $self = shift;
+
+ my %args = ref($_[0]) ? %{$_[0]} : @_;
+
+ #set default args
+ $args{s3} = $self->client->s3;
+ $args{key} = $self->key;
+ $args{bucket} = $self->bucket->name;
+
+ my $http_request =
+ Net::Amazon::S3::Request::CompleteMultipartUpload->new(%args)->http_request;
+ return $self->client->_send_request($http_request);
+}
+
+sub put_part {
+ my $self = shift;
+
+ my %args = ref($_[0]) ? %{$_[0]} : @_;
+
+ #set default args
+ $args{s3} = $self->client->s3;
+ $args{key} = $self->key;
+ $args{bucket} = $self->bucket->name;
+ #work out content length header
+ $args{headers}->{'Content-Length'} = length $args{value}
+ if(defined $args{value});
+
+ my $http_request =
+ Net::Amazon::S3::Request::PutPart->new(%args)->http_request;
+ return $self->client->_send_request($http_request);
+}
+
+sub list_parts {
+ confess "Not implemented";
+ # TODO - Net::Amazon::S3::Request:ListParts is implemented, but need to
+ # define better interface at this level. Currently returns raw XML.
+}
+
sub uri {
my $self = shift;
return Net::Amazon::S3::Request::GetObject->new(
@@ -283,6 +331,11 @@ sub _etag {
return $etag;
}
+sub _is_multipart_etag {
+ my ( $self, $etag ) = @_;
+ return 1 if($etag =~ /\-\d+$/);
+}
+
1;
__END__
@@ -453,3 +506,35 @@ Content-Disposition using content_disposition.
# return the URI of a publically-accessible object
my $uri = $object->uri;
+=head2 initiate_multipart_upload
+
+ #initiate a new multipart upload for this object
+ my $object = $bucket->object(
+ key => 'massive_video.avi'
+ );
+ my $upload_id = $object->initiate_multipart_upload;
+
+=head2 put_part
+
+ #add a part to a multipart upload
+ my $put_part_response = $object->put_part(
+ upload_id => $upload_id,
+ part_number => 1,
+ value => $chunk_content,
+ );
+ my $part_etag = $put_part_response->header('ETag')
+
+ Returns an L<HTTP::Response> object. It is necessary to keep the ETags for
+ each part, as these are required to complete the upload.
+
+=head2 complete_multipart_upload
+
+ #complete a multipart upload
+ $object->complete_multipart_upload(
+ upload_id => $upload_id,
+ etags => [$etag_1, $etag_2],
+ part_numbers => [$part_number_1, $part_number2],
+ );
+
+ The etag and part_numbers parameters are ordered lists specifying the part
+ numbers and ETags for each individual part of the multipart upload.
View
22 lib/Net/Amazon/S3/HTTPRequest.pm 100644 → 100755
@@ -6,13 +6,14 @@ use MIME::Base64 qw( encode_base64 );
use Moose::Util::TypeConstraints;
use URI::Escape qw( uri_escape_utf8 );
use URI::QueryParam;
+use URI;
# ABSTRACT: Create a signed HTTP::Request
my $METADATA_PREFIX = 'x-amz-meta-';
my $AMAZON_HEADER_PREFIX = 'x-amz-';
-enum 'HTTPMethod' => qw(DELETE GET HEAD PUT);
+enum 'HTTPMethod' => qw(DELETE GET HEAD PUT POST);
has 's3' => ( is => 'ro', isa => 'Net::Amazon::S3', required => 1 );
has 'method' => ( is => 'ro', isa => 'HTTPMethod', required => 1 );
@@ -140,13 +141,18 @@ sub _canonical_string {
$path =~ /^([^?]*)/;
$buf .= "/$1";
- # ...unless there is an acl or torrent parameter
- if ( $path =~ /[&?]acl($|=|&)/ ) {
- $buf .= '?acl';
- } elsif ( $path =~ /[&?]torrent($|=|&)/ ) {
- $buf .= '?torrent';
- } elsif ( $path =~ /[&?]location($|=|&)/ ) {
- $buf .= '?location';
+ # ...unless there any parameters we're interested in...
+ if ( $path =~ /[&?](acl|torrent|location|uploads|delete)($|=|&)/ ) {
+ $buf .= "?$1";
+ } elsif ( my %query_params = URI->new($path)->query_form ){
+ #see if the remaining parsed query string provides us with any query string or upload id
+ if($query_params{partNumber} && $query_params{uploadId}){
+ #re-evaluate query string, the order of the params is important for request signing, so we can't depend on URI to do the right thing
+ $buf .= sprintf("?partNumber=%s&uploadId=%s", $query_params{partNumber}, $query_params{uploadId});
+ }
+ elsif($query_params{uploadId}){
+ $buf .= sprintf("?uploadId=%s",$query_params{uploadId});
+ }
}
return $buf;
View
84 lib/Net/Amazon/S3/Request/CompleteMultipartUpload.pm
@@ -0,0 +1,84 @@
+package Net::Amazon::S3::Request::CompleteMultipartUpload;
+use Moose 0.85;
+
+use Digest::MD5 qw/md5 md5_hex/;
+use MIME::Base64;
+use Carp qw/croak/;
+use XML::LibXML;
+
+extends 'Net::Amazon::S3::Request';
+
+has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 );
+has 'etags' => ( is => 'ro', isa => 'ArrayRef', required => 1 );
+has 'key' => ( is => 'ro', isa => 'Str', required => 1 );
+has 'part_numbers' => ( is => 'ro', isa => 'ArrayRef', required => 1 );
+has 'upload_id' => ( is => 'ro', isa => 'Str', required => 1 );
+
+__PACKAGE__->meta->make_immutable;
+
+sub http_request {
+ my $self = shift;
+
+ croak "must have an equally sized list of etags and part numbers"
+ unless scalar(@{$self->part_numbers}) == scalar(@{$self->etags});
+
+ #build XML doc
+ my $xml_doc = XML::LibXML::Document->new('1.0','UTF-8');
+ my $root_element = $xml_doc->createElement('CompleteMultipartUpload');
+ $xml_doc->addChild($root_element);
+
+ #add content
+ for(my $i = 0; $i < scalar(@{$self->part_numbers}); $i++ ){
+ my $part = $xml_doc->createElement('Part');
+ $part->appendTextChild('PartNumber' => $self->part_numbers->[$i]);
+ $part->appendTextChild('ETag' => $self->etags->[$i]);
+ $root_element->addChild($part);
+ }
+
+ my $content = $xml_doc->toString;
+
+ my $md5 = md5($content);
+
+ my $md5_base64 = encode_base64($md5);
+ chomp $md5_base64;
+
+ my $header_spec = {
+ 'Content-MD5' => $md5_base64,
+ 'Content-Length' => length $content,
+ 'Content-Type' => 'application/xml'
+ };
+
+ #build signed request
+ return Net::Amazon::S3::HTTPRequest->new( #See patch below
+ s3 => $self->s3,
+ method => 'POST',
+ path => $self->_uri( $self->key ). '?uploadId='.$self->upload_id,
+ content => $content,
+ headers => $header_spec,
+ )->http_request;
+}
+
+1;
+
+__END__
+
+# ABSTRACT: An internal class to complete a multipart upload
+
+=head1 SYNOPSIS
+
+ my $http_request = Net::Amazon::S3::Request::CompleteMultipartUpload->new(
+ s3 => $s3,
+ bucket => $bucket,
+ etags => \@etags,
+ part_numbers => \@part_numbers,
+ )->http_request;
+
+=head1 DESCRIPTION
+
+This module completes a multipart upload.
+
+=head1 METHODS
+
+=head2 http_request
+
+This method returns a HTTP::Request object.
View
78 lib/Net/Amazon/S3/Request/DeleteMultiObject.pm
@@ -0,0 +1,78 @@
+package Net::Amazon::S3::Request::DeleteMultiObject;
+use Moose 0.85;
+
+use Digest::MD5 qw/md5 md5_hex/;
+use MIME::Base64;
+use Carp qw/croak/;
+
+extends 'Net::Amazon::S3::Request';
+
+has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 );
+has 'keys' => ( is => 'ro', isa => 'ArrayRef', required => 1 );
+
+__PACKAGE__->meta->make_immutable;
+
+sub http_request {
+ my $self = shift;
+
+ #croak if we get a request for over 1000 objects
+ croak "The maximum number of keys is 1000"
+ if (scalar(@{$self->keys}) > 1000);
+
+ #build XML doc
+ my $xml_doc = XML::LibXML::Document->new('1.0','UTF-8');
+ my $root_element = $xml_doc->createElement('Delete');
+ $xml_doc->addChild($root_element);
+ $root_element->appendTextChild('Quiet'=>'true');
+ #add content
+ foreach my $key (@{$self->keys}){
+ my $obj_element = $xml_doc->createElement('Object');
+ $obj_element->appendTextChild('Key' => $key);
+ $root_element->addChild($obj_element);
+ }
+
+ my $content = $xml_doc->toString;
+
+ my $md5 = md5($content);
+ my $md5_base64 = encode_base64($md5);
+ chomp $md5_base64;
+
+ my $header_spec = {
+ 'Content-MD5' => $md5_base64,
+ 'Content-Length' => length $content,
+ 'Content-Type' => 'application/xml'
+ };
+
+ #build signed request
+ return Net::Amazon::S3::HTTPRequest->new(
+ s3 => $self->s3,
+ method => 'POST',
+ path => $self->bucket . '/?delete',
+ content => $content,
+ headers => $header_spec,
+ )->http_request;
+}
+
+1;
+
+__END__
+
+# ABSTRACT: An internal class to delete multiple objects from a bucket
+
+=head1 SYNOPSIS
+
+ my $http_request = Net::Amazon::S3::Request::DeleteMultiObject->new(
+ s3 => $s3,
+ bucket => $bucket,
+ keys => [$key1, $key2],
+ )->http_request;
+
+=head1 DESCRIPTION
+
+This module deletes multiple objects from a bucket.
+
+=head1 METHODS
+
+=head2 http_request
+
+This method returns a HTTP::Request object.
View
53 lib/Net/Amazon/S3/Request/InitiateMultipartUpload.pm
@@ -0,0 +1,53 @@
+package Net::Amazon::S3::Request::InitiateMultipartUpload;
+
+use Moose 0.85;
+use MooseX::StrictConstructor 0.16;
+extends 'Net::Amazon::S3::Request';
+
+has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 );
+has 'key' => ( is => 'ro', isa => 'Str', required => 1 );
+has 'acl_short' => ( is => 'ro', isa => 'Maybe[AclShort]', required => 0 );
+has 'headers' =>
+ ( is => 'ro', isa => 'HashRef', required => 0, default => sub { {} } );
+
+__PACKAGE__->meta->make_immutable;
+
+sub http_request {
+ my $self = shift;
+ my $headers = $self->headers;
+
+ if ( $self->acl_short ) {
+ $headers->{'x-amz-acl'} = $self->acl_short;
+ }
+
+ return Net::Amazon::S3::HTTPRequest->new(
+ s3 => $self->s3,
+ method => 'POST',
+ path => $self->_uri( $self->key ).'?uploads',
+ headers => $self->headers,
+ )->http_request;
+}
+
+1;
+
+__END__
+
+#ABSTRACT: An internal class to begin a multipart upload
+
+=head1 SYNOPSIS
+
+ my $http_request = Net::Amazon::S3::Request::InitiateMultipartUpload->new(
+ s3 => $s3,
+ bucket => $bucket,
+ keys => $key,
+ )->http_request;
+
+=head1 DESCRIPTION
+
+This module begins a multipart upload
+
+=head1 METHODS
+
+=head2 http_request
+
+This method returns a HTTP::Request object.
View
34 lib/Net/Amazon/S3/Request/ListParts.pm
@@ -0,0 +1,34 @@
+package Net::Amazon::S3::Request::ListParts;
+
+# ABSTRACT: List the parts in a multipart upload.
+
+use Moose 0.85;
+use MooseX::StrictConstructor 0.16;
+extends 'Net::Amazon::S3::Request';
+
+has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 );
+has 'key' => ( is => 'ro', isa => 'Str', required => 1 );
+has 'upload_id' => ( is => 'ro', isa => 'Str', required => 1 );
+has 'acl_short' => ( is => 'ro', isa => 'Maybe[AclShort]', required => 0 );
+has 'headers' =>
+ ( is => 'ro', isa => 'HashRef', required => 0, default => sub { {} } );
+
+__PACKAGE__->meta->make_immutable;
+
+sub http_request {
+ my $self = shift;
+ my $headers = $self->headers;
+
+ if ( $self->acl_short ) {
+ $headers->{'x-amz-acl'} = $self->acl_short;
+ }
+
+ return Net::Amazon::S3::HTTPRequest->new(
+ s3 => $self->s3,
+ method => 'GET',
+ path => $self->_uri( $self->key ).'?uploadId='.$self->upload_id,
+ headers => $self->headers,
+ )->http_request;
+}
+
+1;
View
72 lib/Net/Amazon/S3/Request/PutPart.pm
@@ -0,0 +1,72 @@
+package Net::Amazon::S3::Request::PutPart;
+use Moose 0.85;
+use MooseX::StrictConstructor 0.16;
+extends 'Net::Amazon::S3::Request';
+
+has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 );
+has 'key' => ( is => 'ro', isa => 'Str', required => 1 );
+has 'value' => ( is => 'ro', isa => 'Str|CodeRef', required => 0 );
+has 'upload_id' => ( is => 'ro', isa => 'Str', required => 1 );
+has 'part_number' => ( is => 'ro', isa => 'Int', required => 1 );
+has 'copy_source_bucket' => ( is => 'ro', isa => 'Str', required => 0 );
+has 'copy_source_key' => ( is => 'ro', isa => 'Str', required => 0 );
+has 'acl_short' => ( is => 'ro', isa => 'Maybe[AclShort]', required => 0 );
+has 'headers' =>
+ ( is => 'ro', isa => 'HashRef', required => 0, default => sub { {} } );
+
+__PACKAGE__->meta->make_immutable;
+
+sub http_request {
+ my $self = shift;
+ my $headers = $self->headers;
+
+ if ( $self->acl_short ) {
+ $headers->{'x-amz-acl'} = $self->acl_short;
+ }
+
+ if(defined $self->copy_source_bucket && defined $self->copy_source_key){
+ $headers->{'x-amz-copy-source'} =
+ $self->copy_source_bucket.'/'.$self->copy_source_key;
+ }
+
+ return Net::Amazon::S3::HTTPRequest->new(
+ s3 => $self->s3,
+ method => 'PUT',
+ path => $self->_uri($self->key) .
+ '?partNumber=' .
+ $self->part_number .
+ '&uploadId=' .
+ $self->upload_id,
+ headers => $headers,
+ content => $self->value // '',
+ )->http_request;
+}
+
+1;
+
+__END__
+
+# ABSTRACT: An internal class to put part of a multipart upload
+
+=head1 SYNOPSIS
+
+ my $http_request = Net::Amazon::S3::Request::PutPart->new(
+ s3 => $s3,
+ bucket => $bucket,
+ key => $key,
+ value => $value,
+ acl_short => $acl_short,
+ headers => $conf,
+ part_number => $part_number
+ upload_id => $upload_id
+ )->http_request;
+
+=head1 DESCRIPTION
+
+This module puts an object.
+
+=head1 METHODS
+
+=head2 http_request
+
+This method returns a HTTP::Request object.
View
75 t/02client.t 100644 → 100755
@@ -7,11 +7,12 @@ use LWP::Simple;
use File::stat;
use Test::More;
use Test::Exception;
+use File::Temp qw/ :seekable /;
unless ( $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) {
plan skip_all => 'Testing this module for real costs money.';
} else {
- plan tests => 38;
+ plan tests => 48;
}
use_ok('Net::Amazon::S3');
@@ -235,14 +236,10 @@ is( $objects[0]->size, $readme_size,
ok( $objects[0]->last_modified, 'newly created object has a last modified' );
# download an object with get_filename
-
-if ( -f 't/README' ) {
- unlink('t/README') || die $!;
-}
-
-$object->get_filename('t/README');
-is( stat('t/README')->size, $readme_size, 'download has right size' );
-is( file_md5_hex('t/README'), $readme_md5hex, 'download has right etag' );
+my $tmp_fh = File::Temp->new();
+$object->get_filename($tmp_fh->filename);
+is( stat($tmp_fh->filename)->size, $readme_size, 'download has right size' );
+is( file_md5_hex($tmp_fh->filename), $readme_md5hex, 'download has right etag' );
$object->delete;
@@ -258,5 +255,63 @@ is( length( get( $object->uri ) ),
$readme_size, 'newly uploaded public object has the right size' );
$object->delete;
-$bucket->delete;
+# upload an object using multipart upload
+$object = $bucket->object(
+ key => 'new multipart file',
+ acl_short => 'public-read'
+);
+
+my $upload_id;
+ok($upload_id = $object->initiate_multipart_upload, "can initiate a new multipart upload");
+
+#put part
+
+my $put_part_response;
+ok( $put_part_response = $object->put_part(part_number => 1, upload_id => $upload_id, value => 'x' x (5 * 1024 * 1024)), 'Got a successful response for PUT part' );
+my @etags;
+push @etags, $put_part_response->header('ETag');
+ok( $put_part_response = $object->put_part(part_number => 2, upload_id => $upload_id, value => 'z' x (1024 * 1024)), 'Got a successful response for 2nd PUT part' );
+push @etags, $put_part_response->header('ETag');
+
+# TODO list part? - We've got this, but how to expose it nicely?
+
+#complete multipart upload
+my $complete_upload_response;
+ok(
+ $complete_upload_response = $object->complete_multipart_upload( upload_id => $upload_id, part_numbers => [1,2], etags => \@etags),
+ "successful response for complete multipart upload"
+);
+#get the file and check that it looks like we expect
+ok($object->exists, "object has now been created");
+
+$tmp_fh = File::Temp->new();
+$object->get_filename($tmp_fh->filename);
+is( stat($tmp_fh->filename)->size, 6 * 1024 * 1024, "downloaded file has a size equivalent to the sum of it's parts");
+
+$tmp_fh->seek((5 * 1024 * 1024) - 1, SEEK_SET);#jump to 5MB position
+my $test_bytes;
+read($tmp_fh, $test_bytes, 2);
+is($test_bytes, "xz", "The second chunk of the file begins in the correct place");
+
+$object->delete;
+
+#test multi-object delete
+#make 3 identical objects
+@objects =();
+for my $i(1..3){
+ my $bulk_object = $bucket->object(
+ key => "bulk-readme-$i",
+ etag => $readme_md5hex,
+ size => $readme_size
+ );
+ $bulk_object->put_filename('README');
+ push @objects, $bulk_object;
+}
+#now delete 2 of those objects
+ok($bucket->delete_multi_object(@objects[0..1]), "executed multi delete operation");
+ok( !grep($_->exists, @objects[0..1]), "target objects no longer exist");
+ok( $objects[2]->exists, "object not included in multi-object delete still exists" );
+$objects[2]->delete;
+
+$bucket->delete;
Something went wrong with that request. Please try again.