diff --git a/lib/Net/Amazon/S3.pm b/lib/Net/Amazon/S3.pm old mode 100644 new mode 100755 index 898e1d0a..fe35e580 --- a/lib/Net/Amazon/S3.pm +++ b/lib/Net/Amazon/S3.pm @@ -112,16 +112,21 @@ use Net::Amazon::S3::Client::Bucket; use Net::Amazon::S3::Client::Object; use Net::Amazon::S3::HTTPRequest; use Net::Amazon::S3::Request; +use Net::Amazon::S3::Request::CompleteMultipartUpload; use Net::Amazon::S3::Request::CreateBucket; use Net::Amazon::S3::Request::DeleteBucket; +use Net::Amazon::S3::Request::DeleteMultiObject; use Net::Amazon::S3::Request::DeleteObject; use Net::Amazon::S3::Request::GetBucketAccessControl; use Net::Amazon::S3::Request::GetBucketLocationConstraint; use Net::Amazon::S3::Request::GetObject; use Net::Amazon::S3::Request::GetObjectAccessControl; +use Net::Amazon::S3::Request::InitiateMultipartUpload; use Net::Amazon::S3::Request::ListAllMyBuckets; use Net::Amazon::S3::Request::ListBucket; +use Net::Amazon::S3::Request::ListParts; use Net::Amazon::S3::Request::PutObject; +use Net::Amazon::S3::Request::PutPart; use Net::Amazon::S3::Request::SetBucketAccessControl; use Net::Amazon::S3::Request::SetObjectAccessControl; use LWP::UserAgent::Determined; @@ -196,13 +201,13 @@ sub BUILD { if ( $self->retry ) { $ua = LWP::UserAgent::Determined->new( keep_alive => $KEEP_ALIVE_CACHESIZE, - requests_redirectable => [qw(GET HEAD DELETE PUT)], + requests_redirectable => [qw(GET HEAD DELETE PUT POST)], ); $ua->timing('1,2,4,8,16,32'); } else { $ua = LWP::UserAgent->new( keep_alive => $KEEP_ALIVE_CACHESIZE, - requests_redirectable => [qw(GET HEAD DELETE PUT)], + requests_redirectable => [qw(GET HEAD DELETE PUT POST)], ); } diff --git a/lib/Net/Amazon/S3/Client/Bucket.pm b/lib/Net/Amazon/S3/Client/Bucket.pm old mode 100644 new mode 100755 index 87f52276..d97b55ad --- a/lib/Net/Amazon/S3/Client/Bucket.pm +++ b/lib/Net/Amazon/S3/Client/Bucket.pm @@ -132,6 +132,18 @@ sub list { ); } +sub delete_multi_object { + my $self = shift; + my @objects = @_; + return unless( scalar(@objects) ); + my $http_request = Net::Amazon::S3::Request::DeleteMultiObject->new( + s3 => $self->client->s3, + bucket => $self->name, + keys => [ map($_->key, @objects) ], + )->http_request; + return $self->client->_send_request($http_request); +} + sub object { my ( $self, %conf ) = @_; return Net::Amazon::S3::Client::Object->new( @@ -141,6 +153,7 @@ sub object { ); } + 1; __END__ @@ -228,3 +241,10 @@ This module represents buckets. # be used to get or put my $object = $bucket->object( key => 'this is the key' ); +=head2 delete_multi_object + + # delete multiple objects using a multi object delete operation + # Accepts a list of L objects. + # Limited to a maximum of 1000 objects in one operation + $bucket->delete_multi_object($object1, $object2) + diff --git a/lib/Net/Amazon/S3/Client/Object.pm b/lib/Net/Amazon/S3/Client/Object.pm old mode 100644 new mode 100755 index 4c87669e..d38c58b1 --- a/lib/Net/Amazon/S3/Client/Object.pm +++ b/lib/Net/Amazon/S3/Client/Object.pm @@ -74,13 +74,9 @@ sub get { my $content = $http_response->content; my $md5_hex = md5_hex($content); + my $etag = $self->etag || $self->_etag($http_response); + confess 'Corrupted download' if( !$self->_is_multipart_etag($etag) && $etag ne $md5_hex); - if ( $self->etag ) { - confess 'Corrupted download' if $self->etag ne $md5_hex; - } else { - confess 'Corrupted download' - if $self->_etag($http_response) ne $md5_hex; - } return $content; } @@ -99,12 +95,8 @@ sub get_filename { my $md5_hex = file_md5_hex($filename); - if ( $self->etag ) { - confess 'Corrupted download' if $self->etag ne $md5_hex; - } else { - confess 'Corrupted download' - if $self->_etag($http_response) ne $md5_hex; - } + my $etag = $self->etag || $self->_etag($http_response); + confess 'Corrupted download' if( !$self->_is_multipart_etag($etag) && $etag ne $md5_hex); } sub put { @@ -210,6 +202,57 @@ sub delete { $self->client->_send_request($http_request); } +sub initiate_multipart_upload { + my $self = shift; + my $http_request = Net::Amazon::S3::Request::InitiateMultipartUpload->new( + s3 => $self->client->s3, + bucket => $self->bucket->name, + key => $self->key, + )->http_request; + my $res = $self->client->_send_request($http_request); + return unless $res->is_success; + + my $doc = $self->client->s3->libxml->parse_string($res->content); + my $xpc = XML::LibXML::XPathContext->new($doc); + $xpc->registerNs( 's3', + 'http://s3.amazonaws.com/doc/2006-03-01/' ); + my $upload_id = $xpc->findvalue('//s3:UploadId'); + return $upload_id; +} + +sub complete_multipart_upload { + my $self = shift; + + my %args = ref($_[0]) ? {$_[0]} : @_; + + #set default args + $args{s3} = $self->client->s3; + $args{key} = $self->key; + $args{bucket} = $self->bucket->name; + + my $http_request = Net::Amazon::S3::Request::CompleteMultipartUpload->new(%args)->http_request; + return $self->client->_send_request($http_request); +} + +sub put_part { + my $self = shift; + + my %args = ref($_[0]) ? {$_[0]} : @_; + + #set default args + $args{s3} = $self->client->s3; + $args{key} = $self->key; + $args{bucket} = $self->bucket->name; + + my $http_request = Net::Amazon::S3::Request::PutPart->new(%args)->http_request; + return $self->client->_send_request($http_request); +} + +sub list_parts { + confess "Not implemented"; + #TODO - Net::Amazon::S3::Request:ListParts is implemented, but need to define best interface at this level. Currently returns raw XML +} + sub uri { my $self = shift; return Net::Amazon::S3::Request::GetObject->new( @@ -283,6 +326,11 @@ sub _etag { return $etag; } +sub _is_multipart_etag { + my ( $self, $etag ) = @_; + return 1 if($etag =~ /\-\d+$/); +} + 1; __END__ @@ -453,3 +501,34 @@ Content-Disposition using content_disposition. # return the URI of a publically-accessible object my $uri = $object->uri; +=head2 initiate_multipart_upload + + #initiate a new multipart upload for this object + my $object = $bucket->object( + key => 'massive_video.avi' + ); + my $upload_id = $object->initiate_multipart_upload; + +=head2 put_part + + #add a part to a multipart upload + my $put_part_response = $object->put_part( + upload_id => $upload_id, + part_number => 1, + value => $chunk_content, + ); + my $part_etag = $put_part_response->header('ETag') + + Returns an L object. It is necessary to keep the ETags for each part, as these are required to complete the upload. + +=head2 complete_multipart_upload + + #complete a multipart upload + $object->complete_multipart_upload( + upload_id => $upload_id, + etags => [$etag_1, $etag_2], + part_numbers => [$part_number_1, $part_number2], + ); + + The etag and part_numbers parameters are ordered lists specifying the part numbers and ETags for each individual part of the multipart upload. + diff --git a/lib/Net/Amazon/S3/HTTPRequest.pm b/lib/Net/Amazon/S3/HTTPRequest.pm old mode 100644 new mode 100755 index b5cace4a..88934003 --- a/lib/Net/Amazon/S3/HTTPRequest.pm +++ b/lib/Net/Amazon/S3/HTTPRequest.pm @@ -6,13 +6,14 @@ use MIME::Base64 qw( encode_base64 ); use Moose::Util::TypeConstraints; use URI::Escape qw( uri_escape_utf8 ); use URI::QueryParam; +use URI; # ABSTRACT: Create a signed HTTP::Request my $METADATA_PREFIX = 'x-amz-meta-'; my $AMAZON_HEADER_PREFIX = 'x-amz-'; -enum 'HTTPMethod' => qw(DELETE GET HEAD PUT); +enum 'HTTPMethod' => qw(DELETE GET HEAD PUT POST); has 's3' => ( is => 'ro', isa => 'Net::Amazon::S3', required => 1 ); has 'method' => ( is => 'ro', isa => 'HTTPMethod', required => 1 ); @@ -140,15 +141,43 @@ sub _canonical_string { $path =~ /^([^?]*)/; $buf .= "/$1"; - # ...unless there is an acl or torrent parameter - if ( $path =~ /[&?]acl($|=|&)/ ) { - $buf .= '?acl'; - } elsif ( $path =~ /[&?]torrent($|=|&)/ ) { - $buf .= '?torrent'; - } elsif ( $path =~ /[&?]location($|=|&)/ ) { - $buf .= '?location'; + # ...unless there any parameters we're interested in... + if ( $path =~ /[&?](acl|torrent|location|uploads|delete)($|=|&)/ ) { + $buf .= "?$1"; + } elsif ( my %query_params = URI->new($path)->query_form ){ + #see if the remaining parsed query string provides us with any + if($query_params{partNumber} && $query_params{uploadId}){ + #re-evaluate query string, the order of the params is important for request signing, so we can't depend on URI to do the right thing + $buf .= sprintf("?partNumber=%s&uploadId=%s", $query_params{partNumber}, $query_params{uploadId}); + } + elsif($query_params{uploadId}){ + $buf .= sprintf("?uploadId=%s",$query_params{uploadId}); + } } + # action parameters + #check for allowed CGI params +# my $uri = URI->new($path); +# if(my $qs = $uri->query){ +# #sometimes the query string might composed only of a single key, which URI won't parse properl +# #if that's the case check that it's a valid one, and just bolt it on the end +# if($qs ~~ [qw/delete torrent uploads location/]){ +# $buf .= "?$qs"; +# } +# else { +# my %query_params = $uri->query_form; +# #strip out disallowed query params +# if($query_params{partNumber} && $query_params{uploadId}){ +# #re-evaluate query string, the order of the params is important, so we can't depend on URI +# $qs = sprintf("partNumber=%s&uploadId=%s", $query_params{partNumber}, $query_params{uploadId}); +# } +# elsif($query_params{uploadId}){ +# $qs = sprintf("uploadId=%s",$query_params{uploadId}); +# } +# $buf .= "?$qs" if($qs); +# } +# } + return $buf; } diff --git a/lib/Net/Amazon/S3/Request/CompleteMultipartUpload.pm b/lib/Net/Amazon/S3/Request/CompleteMultipartUpload.pm new file mode 100755 index 00000000..97aa790e --- /dev/null +++ b/lib/Net/Amazon/S3/Request/CompleteMultipartUpload.pm @@ -0,0 +1,84 @@ +package Net::Amazon::S3::Request::CompleteMultipartUpload; +use Moose 0.85; + +use Digest::MD5 qw/md5 md5_hex/; +use MIME::Base64; +use Carp qw/croak/; +use XML::LibXML; + +extends 'Net::Amazon::S3::Request'; + +has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 ); +has 'etags' => ( is => 'ro', isa => 'ArrayRef', required => 1 ); +has 'key' => ( is => 'ro', isa => 'Str', required => 1 ); +has 'part_numbers' => ( is => 'ro', isa => 'ArrayRef', required => 1 ); +has 'upload_id' => ( is => 'ro', isa => 'Str', required => 1 ); + +__PACKAGE__->meta->make_immutable; + +sub http_request { + my $self = shift; + + croak "must have an equally sized list of etags and part numbers" unless scalar(@{$self->part_numbers}) eq scalar(@{$self->etags}); + #build XML doc + my $xml_doc = XML::LibXML::Document->new('1.0','UTF-8'); + my $root_element = $xml_doc->createElement('CompleteMultipartUpload'); + $xml_doc->addChild($root_element); + + #add content + for(my $i = 0; $i < scalar(@{$self->part_numbers}); $i++ ){ + my $part = $xml_doc->createElement('Part'); + $part->appendTextChild('PartNumber' => $self->part_numbers->[$i]); + $part->appendTextChild('ETag' => $self->etags->[$i]); + $root_element->addChild($part); + } + + my $content = $xml_doc->toString; + + my $md5 = md5($content); + + my $md5_base64 = encode_base64($md5); + chomp $md5_base64; + + my $header_spec = { + 'Content-MD5' => $md5_base64, + 'Content-Length' => length $content, + 'Content-Type' => 'application/xml' + }; + + #build signed request + return Net::Amazon::S3::HTTPRequest->new( #See patch below + s3 => $self->s3, + method => 'POST', + path => $self->_uri( $self->key ). '?uploadId='.$self->upload_id, + content => $content, + headers => $header_spec, + )->http_request; +} + +1; + +__END__ + +=head1 NAME + +Net::Amazon::S3::Request::CompleteMultipartUpload - An internal class to complete a multipart upload + +=head1 SYNOPSIS + + my $http_request = Net::Amazon::S3::Request::CompleteMultipartUpload->new( + s3 => $s3, + bucket => $bucket, + etags => \@etags, + part_numbers => \@part_numbers, + )->http_request; + +=head1 DESCRIPTION + +This module deletes multiple objects from a bucket. + +=head1 METHODS + +=head2 http_request + +This method returns a HTTP::Request object. \ No newline at end of file diff --git a/lib/Net/Amazon/S3/Request/DeleteMultiObject.pm b/lib/Net/Amazon/S3/Request/DeleteMultiObject.pm new file mode 100755 index 00000000..548b02aa --- /dev/null +++ b/lib/Net/Amazon/S3/Request/DeleteMultiObject.pm @@ -0,0 +1,79 @@ +package Net::Amazon::S3::Request::DeleteMultiObject; +use Moose 0.85; + +use Digest::MD5 qw/md5 md5_hex/; +use MIME::Base64; +use Carp qw/croak/; + +extends 'Net::Amazon::S3::Request'; + +has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 ); +has 'keys' => ( is => 'ro', isa => 'ArrayRef', required => 1 ); + +__PACKAGE__->meta->make_immutable; + +sub http_request { + my $self = shift; + + #croak if we get a request for over 1000 objects + croak "The maximum number of keys is 1000" if(scalar(@{ $self->keys }) > 1000); + + #build XML doc + my $xml_doc = XML::LibXML::Document->new('1.0','UTF-8'); + my $root_element = $xml_doc->createElement('Delete'); + $xml_doc->addChild($root_element); + $root_element->appendTextChild('Quiet'=>'true'); + #add content + foreach my $key (@{$self->keys}){ + my $obj_element = $xml_doc->createElement('Object'); + $obj_element->appendTextChild('Key' => $key); + $root_element->addChild($obj_element); + } + + my $content = $xml_doc->toString; + + my $md5 = md5($content); + my $md5_base64 = encode_base64($md5); + chomp $md5_base64; + + my $header_spec = { + 'Content-MD5' => $md5_base64, + 'Content-Length' => length $content, + 'Content-Type' => 'application/xml' + }; + + #build signed request + return Net::Amazon::S3::HTTPRequest->new( + s3 => $self->s3, + method => 'POST', + path => $self->bucket . '/?delete', + content => $content, + headers => $header_spec, + )->http_request; +} + +1; + +__END__ + +=head1 NAME + +Net::Amazon::S3::Request::DeleteMultiObject - An internal class to delete multiple objects from a bucket + +=head1 SYNOPSIS + + my $http_request = Net::Amazon::S3::Request::DeleteMultiObject->new( + s3 => $s3, + bucket => $bucket, + keys => [$key1, $key2], + )->http_request; + +=head1 DESCRIPTION + +This module deletes multiple objects from a bucket. + +=head1 METHODS + +=head2 http_request + +This method returns a HTTP::Request object. \ No newline at end of file diff --git a/lib/Net/Amazon/S3/Request/InitiateMultipartUpload.pm b/lib/Net/Amazon/S3/Request/InitiateMultipartUpload.pm new file mode 100755 index 00000000..a6165520 --- /dev/null +++ b/lib/Net/Amazon/S3/Request/InitiateMultipartUpload.pm @@ -0,0 +1,55 @@ +package Net::Amazon::S3::Request::InitiateMultipartUpload; + +use Moose 0.85; +use MooseX::StrictConstructor 0.16; +extends 'Net::Amazon::S3::Request'; + +has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 ); +has 'key' => ( is => 'ro', isa => 'Str', required => 1 ); +has 'acl_short' => ( is => 'ro', isa => 'Maybe[AclShort]', required => 0 ); +has 'headers' => + ( is => 'ro', isa => 'HashRef', required => 0, default => sub { {} } ); + +__PACKAGE__->meta->make_immutable; + +sub http_request { + my $self = shift; + my $headers = $self->headers; + + if ( $self->acl_short ) { + $headers->{'x-amz-acl'} = $self->acl_short; + } + + return Net::Amazon::S3::HTTPRequest->new( + s3 => $self->s3, + method => 'POST', + path => $self->_uri( $self->key ).'?uploads', + headers => $self->headers, + )->http_request; +} + +1; + +__END__ + +=head1 NAME + +Net::Amazon::S3::Request::InitiateMultipartUpload - An internal class to begin a multipart upload + +=head1 SYNOPSIS + + my $http_request = Net::Amazon::S3::Request::InitiateMultipartUpload->new( + s3 => $s3, + bucket => $bucket, + keys => $key, + )->http_request; + +=head1 DESCRIPTION + +This module begins a multipart upload + +=head1 METHODS + +=head2 http_request + +This method returns a HTTP::Request object. \ No newline at end of file diff --git a/lib/Net/Amazon/S3/Request/ListParts.pm b/lib/Net/Amazon/S3/Request/ListParts.pm new file mode 100755 index 00000000..5c562042 --- /dev/null +++ b/lib/Net/Amazon/S3/Request/ListParts.pm @@ -0,0 +1,32 @@ +package Net::Amazon::S3::Request::ListParts; + +use Moose 0.85; +use MooseX::StrictConstructor 0.16; +extends 'Net::Amazon::S3::Request'; + +has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 ); +has 'key' => ( is => 'ro', isa => 'Str', required => 1 ); +has 'upload_id' => ( is => 'ro', isa => 'Str', required => 1 ); +has 'acl_short' => ( is => 'ro', isa => 'Maybe[AclShort]', required => 0 ); +has 'headers' => + ( is => 'ro', isa => 'HashRef', required => 0, default => sub { {} } ); + +__PACKAGE__->meta->make_immutable; + +sub http_request { + my $self = shift; + my $headers = $self->headers; + + if ( $self->acl_short ) { + $headers->{'x-amz-acl'} = $self->acl_short; + } + + return Net::Amazon::S3::HTTPRequest->new( + s3 => $self->s3, + method => 'GET', + path => $self->_uri( $self->key ).'?uploadId='.$self->upload_id, + headers => $self->headers, + )->http_request; +} + +1; diff --git a/lib/Net/Amazon/S3/Request/PutPart.pm b/lib/Net/Amazon/S3/Request/PutPart.pm new file mode 100755 index 00000000..0b1c96af --- /dev/null +++ b/lib/Net/Amazon/S3/Request/PutPart.pm @@ -0,0 +1,70 @@ +package Net::Amazon::S3::Request::PutPart; +use Moose 0.85; +use MooseX::StrictConstructor 0.16; +extends 'Net::Amazon::S3::Request'; + +has 'bucket' => ( is => 'ro', isa => 'BucketName', required => 1 ); +has 'key' => ( is => 'ro', isa => 'Str', required => 1 ); +has 'value' => ( is => 'ro', isa => 'Str|CodeRef', required => 0 ); +has 'upload_id' => ( is => 'ro', isa => 'Str', required => 1 ); +has 'part_number' => ( is => 'ro', isa => 'Int', required => 1 ); +has 'copy_source_bucket' => ( is => 'ro', isa => 'Str', required => 0 ); +has 'copy_source_key' => ( is => 'ro', isa => 'Str', required => 0 ); +has 'acl_short' => ( is => 'ro', isa => 'Maybe[AclShort]', required => 0 ); +has 'headers' => + ( is => 'ro', isa => 'HashRef', required => 0, default => sub { {} } ); + +__PACKAGE__->meta->make_immutable; + +sub http_request { + my $self = shift; + my $headers = $self->headers; + + if ( $self->acl_short ) { + $headers->{'x-amz-acl'} = $self->acl_short; + } + + $headers->{'Content-Length'} = length $self->value if(defined $self->value); + if($self->copy_source_bucket && $self->copy_source_key){ + $headers->{'x-amz-copy-source'} = $self->copy_source_bucket.'/'.$self->copy_source_key; + } + + return Net::Amazon::S3::HTTPRequest->new( + s3 => $self->s3, + method => 'PUT', + path => $self->_uri( $self->key ).'?partNumber='.$self->part_number.'&uploadId='.$self->upload_id, + headers => $headers, + content => $self->value // '', + )->http_request; +} + +1; + +__END__ + +=head1 NAME + +Net::Amazon::S3::Request::PutPart - An internal class to put part of a multipart upload + +=head1 SYNOPSIS + + my $http_request = Net::Amazon::S3::Request::PutPart->new( + s3 => $s3, + bucket => $bucket, + key => $key, + value => $value, + acl_short => $acl_short, + headers => $conf, + part_number => $part_number + upload_id => $upload_id + )->http_request; + +=head1 DESCRIPTION + +This module puts an object. + +=head1 METHODS + +=head2 http_request + +This method returns a HTTP::Request object. \ No newline at end of file diff --git a/t/02client.t b/t/02client.t old mode 100644 new mode 100755 index 8895b13e..92dc3c78 --- a/t/02client.t +++ b/t/02client.t @@ -7,11 +7,12 @@ use LWP::Simple; use File::stat; use Test::More; use Test::Exception; +use Fcntl qw(:seek); unless ( $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { plan skip_all => 'Testing this module for real costs money.'; } else { - plan tests => 38; + plan tests => 48; } use_ok('Net::Amazon::S3'); @@ -258,5 +259,68 @@ is( length( get( $object->uri ) ), $readme_size, 'newly uploaded public object has the right size' ); $object->delete; -$bucket->delete; +# upload an object using multipart upload +$object = $bucket->object( + key => 'new multipart file', + acl_short => 'public-read' +); + +my $upload_id; +ok($upload_id = $object->initiate_multipart_upload, "can initiate a new multipart upload"); + +#put part + +my $put_part_response; +ok( $put_part_response = $object->put_part(part_number => 1, upload_id => $upload_id, value => 'x' x (5 * 1024 * 1024)), 'Got a successful response for PUT part' ); +my @etags; +push @etags, $put_part_response->header('ETag'); +ok( $put_part_response = $object->put_part(part_number => 2, upload_id => $upload_id, value => 'z' x (1024 * 1024)), 'Got a successful response for 2nd PUT part' ); +push @etags, $put_part_response->header('ETag'); + +# TODO list part? - We've got this, but how to expose it nicely? + +#complete multipart upload +my $complete_upload_response; +ok( + $complete_upload_response = $object->complete_multipart_upload( upload_id => $upload_id, part_numbers => [1,2], etags => \@etags), + "successful response for complete multipart upload" +); +#get the file and check that it looks like we expect +ok($object->exists, "object has now been created"); + +if ( -f 't/multipart-test' ) { + unlink('t/multipart-test') || die $!; +} +$object->get_filename('t/multipart-test'); +is( stat('t/multipart-test')->size, 6 * 1024 * 1024, "downloaded file has a size equivalent to the sum of it's parts"); + +open(my $test_fh, '<', 't/multipart-test'); +seek($test_fh, (5 * 1024 * 1024) - 1, SEEK_SET); #jump to 5MB position +my $test_bytes; +read($test_fh, $test_bytes, 2); +is($test_bytes, "xz", "The second chunk of the file begins in the correct place"); +close $test_fh; + +unlink('t/multipart-test') || die $!; +$object->delete; + +#test multi-object delete +#make 3 identical objects +my @objects; +for my $i(1..3){ + my $bulk_object = $bucket->object( + key => "bulk-readme-$i", + etag => $readme_md5hex, + size => $readme_size + ); + $bulk_object->put_filename('README'); + push @objects, $bulk_object; +} +#now delete 2 of those objects +ok($bucket->delete_multi_object(@objects[0..1]), "executed multi delete operation"); +ok( !grep($_->exists, @objects[0..1]), "target objects no longer exist"); +ok( $objects[2]->exists, "object not included in multi-object delete still exists" ); +$objects[2]->delete; + +$bucket->delete;