From d390edeea995328e71f92dd3f380cd1a890c13d5 Mon Sep 17 00:00:00 2001 From: Marcel Molina Date: Fri, 6 Jun 2008 02:55:18 -0500 Subject: [PATCH] Moving aws-s3 into git --- CHANGELOG | 67 ++ COPYING | 19 + INSTALL | 55 ++ README | 545 +++++++++++++++ README.erb | 58 ++ Rakefile | 328 +++++++++ TODO | 26 + bin/s3sh | 6 + bin/setup.rb | 10 + lib/aws/s3.rb | 61 ++ lib/aws/s3/acl.rb | 636 ++++++++++++++++++ lib/aws/s3/authentication.rb | 218 ++++++ lib/aws/s3/base.rb | 232 +++++++ lib/aws/s3/bittorrent.rb | 58 ++ lib/aws/s3/bucket.rb | 320 +++++++++ lib/aws/s3/connection.rb | 314 +++++++++ lib/aws/s3/error.rb | 69 ++ lib/aws/s3/exceptions.rb | 133 ++++ lib/aws/s3/extensions.rb | 323 +++++++++ lib/aws/s3/logging.rb | 306 +++++++++ lib/aws/s3/object.rb | 610 +++++++++++++++++ lib/aws/s3/owner.rb | 44 ++ lib/aws/s3/parsing.rb | 99 +++ lib/aws/s3/response.rb | 180 +++++ lib/aws/s3/service.rb | 51 ++ lib/aws/s3/version.rb | 12 + site/index.erb | 41 ++ site/public/images/box-and-gem.gif | Bin 0 -> 7201 bytes site/public/images/favicon.ico | Bin 0 -> 1406 bytes site/public/ruby.css | 18 + site/public/screen.css | 99 +++ support/faster-xml-simple/COPYING | 18 + support/faster-xml-simple/README | 8 + support/faster-xml-simple/Rakefile | 54 ++ .../lib/faster_xml_simple.rb | 187 +++++ .../test/fixtures/test-1.rails.yml | 4 + .../test/fixtures/test-1.xml | 3 + .../test/fixtures/test-1.yml | 4 + .../test/fixtures/test-2.rails.yml | 6 + .../test/fixtures/test-2.xml | 3 + .../test/fixtures/test-2.yml | 6 + .../test/fixtures/test-3.rails.yml | 6 + .../test/fixtures/test-3.xml | 5 + .../test/fixtures/test-3.yml | 6 + .../test/fixtures/test-4.rails.yml | 5 + .../test/fixtures/test-4.xml | 7 + .../test/fixtures/test-4.yml | 5 + .../test/fixtures/test-5.rails.yml | 8 + .../test/fixtures/test-5.xml | 7 + .../test/fixtures/test-5.yml | 8 + .../test/fixtures/test-6.rails.yml | 43 ++ .../test/fixtures/test-6.xml | 29 + .../test/fixtures/test-6.yml | 41 ++ .../test/fixtures/test-7.rails.yml | 23 + .../test/fixtures/test-7.xml | 22 + .../test/fixtures/test-7.yml | 22 + .../test/fixtures/test-8.rails.yml | 14 + .../test/fixtures/test-8.xml | 8 + .../test/fixtures/test-8.yml | 11 + .../faster-xml-simple/test/regression_test.rb | 47 ++ support/faster-xml-simple/test/test_helper.rb | 17 + .../test/xml_simple_comparison_test.rb | 46 ++ support/rdoc/code_info.rb | 211 ++++++ test/acl_test.rb | 254 +++++++ test/authentication_test.rb | 96 +++ test/base_test.rb | 143 ++++ test/bucket_test.rb | 48 ++ test/connection_test.rb | 190 ++++++ test/error_test.rb | 75 +++ test/extensions_test.rb | 331 +++++++++ test/fixtures.rb | 89 +++ test/fixtures/buckets.yml | 102 +++ test/fixtures/errors.yml | 34 + test/fixtures/headers.yml | 3 + test/fixtures/logging.yml | 15 + test/fixtures/loglines.yml | 5 + test/fixtures/logs.yml | 7 + test/fixtures/policies.yml | 16 + test/logging_test.rb | 89 +++ test/mocks/base.rb | 89 +++ test/object_test.rb | 217 ++++++ test/parsing_test.rb | 66 ++ test/remote/acl_test.rb | 117 ++++ test/remote/bittorrent_test.rb | 45 ++ test/remote/bucket_test.rb | 146 ++++ test/remote/logging_test.rb | 82 +++ test/remote/object_test.rb | 371 ++++++++++ test/remote/test_file.data | Bin 0 -> 60673 bytes test/remote/test_helper.rb | 30 + test/response_test.rb | 70 ++ test/service_test.rb | 26 + test/test_helper.rb | 86 +++ 92 files changed, 8664 insertions(+) create mode 100644 CHANGELOG create mode 100644 COPYING create mode 100644 INSTALL create mode 100644 README create mode 100644 README.erb create mode 100644 Rakefile create mode 100644 TODO create mode 100644 bin/s3sh create mode 100644 bin/setup.rb create mode 100644 lib/aws/s3.rb create mode 100644 lib/aws/s3/acl.rb create mode 100644 lib/aws/s3/authentication.rb create mode 100644 lib/aws/s3/base.rb create mode 100644 lib/aws/s3/bittorrent.rb create mode 100644 lib/aws/s3/bucket.rb create mode 100644 lib/aws/s3/connection.rb create mode 100644 lib/aws/s3/error.rb create mode 100644 lib/aws/s3/exceptions.rb create mode 100644 lib/aws/s3/extensions.rb create mode 100644 lib/aws/s3/logging.rb create mode 100644 lib/aws/s3/object.rb create mode 100644 lib/aws/s3/owner.rb create mode 100644 lib/aws/s3/parsing.rb create mode 100644 lib/aws/s3/response.rb create mode 100644 lib/aws/s3/service.rb create mode 100644 lib/aws/s3/version.rb create mode 100644 site/index.erb create mode 100644 site/public/images/box-and-gem.gif create mode 100644 site/public/images/favicon.ico create mode 100644 site/public/ruby.css create mode 100644 site/public/screen.css create mode 100644 support/faster-xml-simple/COPYING create mode 100644 support/faster-xml-simple/README create mode 100644 support/faster-xml-simple/Rakefile create mode 100644 support/faster-xml-simple/lib/faster_xml_simple.rb create mode 100644 support/faster-xml-simple/test/fixtures/test-1.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-1.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-1.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-2.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-2.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-2.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-3.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-3.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-3.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-4.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-4.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-4.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-5.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-5.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-5.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-6.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-6.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-6.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-7.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-7.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-7.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-8.rails.yml create mode 100644 support/faster-xml-simple/test/fixtures/test-8.xml create mode 100644 support/faster-xml-simple/test/fixtures/test-8.yml create mode 100644 support/faster-xml-simple/test/regression_test.rb create mode 100644 support/faster-xml-simple/test/test_helper.rb create mode 100644 support/faster-xml-simple/test/xml_simple_comparison_test.rb create mode 100644 support/rdoc/code_info.rb create mode 100644 test/acl_test.rb create mode 100644 test/authentication_test.rb create mode 100644 test/base_test.rb create mode 100644 test/bucket_test.rb create mode 100644 test/connection_test.rb create mode 100644 test/error_test.rb create mode 100644 test/extensions_test.rb create mode 100644 test/fixtures.rb create mode 100644 test/fixtures/buckets.yml create mode 100644 test/fixtures/errors.yml create mode 100644 test/fixtures/headers.yml create mode 100644 test/fixtures/logging.yml create mode 100644 test/fixtures/loglines.yml create mode 100644 test/fixtures/logs.yml create mode 100644 test/fixtures/policies.yml create mode 100644 test/logging_test.rb create mode 100644 test/mocks/base.rb create mode 100644 test/object_test.rb create mode 100644 test/parsing_test.rb create mode 100644 test/remote/acl_test.rb create mode 100644 test/remote/bittorrent_test.rb create mode 100644 test/remote/bucket_test.rb create mode 100644 test/remote/logging_test.rb create mode 100644 test/remote/object_test.rb create mode 100644 test/remote/test_file.data create mode 100644 test/remote/test_helper.rb create mode 100644 test/response_test.rb create mode 100644 test/service_test.rb create mode 100644 test/test_helper.rb diff --git a/CHANGELOG b/CHANGELOG new file mode 100644 index 0000000..e8867a2 --- /dev/null +++ b/CHANGELOG @@ -0,0 +1,67 @@ +trunk: + +0.4.0: + +- Various adjustments to connection handling to try to mitigate exceptions raised from deep within Net::HTTP. + +- Don't coerce numbers that start with a zero because the zero will be lost. If a bucket, for example, has a name like '0815', all operation it will fail. Closes ticket #10089 [reported anonymously]" + +- Add ability to connect through a proxy using the :proxy option when establishing a connection. Suggested by [Simon Horne ] + +- Add :authenticated option to url_for. When passing false, don't generate signature parameters for query string. + +- Make url_for accept custom port settings. [Rich Olson] + +0.3.0: + +- Ensure content type is eventually set to account for changes made to Net::HTTP in Ruby version 1.8.5. Reported by [David Hanson, Stephen Caudill, Tom Mornini ] + +- Add :persistent option to connections which keeps a persistent connection rather than creating a new one per request, defaulting to true. Based on a patch by [Metalhead ] + +- If we are retrying a request after rescuing one of the retry exceptions, rewind the body if its an IO stream so it starts at the beginning. [Jamis Buck] + +- Ensure that all paths being submitted to S3 are valid utf8. If they are not, we remove the extended characters. Ample help from [Jamis Buck] + +- Wrap logs in Log objects which exposes each line as a Log::Line that has accessors by name for each field. + +- Various performance optimizations for the extensions code. [Roman LE NEGRATE ] + +- Make S3Object.copy more efficient by streaming in both directions in parallel. + +- Open up Net:HTTPGenericRequest to make the chunk size 1 megabyte, up from 1 kilobyte. + +- Add S3Object.exists? + +0.2.1: + +- When the bucket name argument (for e.g. Bucket.objects) is being used as the option hash, reassign it to the options variable and set the bucket to nil so bucket inference + options works. + +- Don't call CGI.escape on query string parameters in Hash#to_query_string since all paths get passed through URI.escape right before the request is made. Paths were getting double escaped. Bug spotted by [David Hanson] + +- Make s3sh exec irb.bat if on Windows. Bug spotted by [N. Sathish Kumar ] + +- Avoid class_variable_(get|set) since it was only recently added to Ruby. Spotted by [N. Sathish Kumar ] + +- Raise NoSuchKey if S3Object.about requests a key that does not exist. + +- If the response body is an empty string, don't try to parse it as xml. + +- Don't reject every body type save for IO and String at the door when making a request. Suggested by [Alex MacCaw ] + +- Allow dots in bucket names. [Jesse Newland] + +0.2.0: + +- Infer content type for an object when calling S3Object.store without explicitly passing in the :content_type option. + +0.1.2: + +- Scrap (overly) fancy generator based version of CoercibleString with a much simpler and clearer case statement. Continuations are really slow and the specific use of the generator was leaking memory. Bug spotted by [Remco van't Veer] + +0.1.1: + +- Don't add the underscore method to String if it is already defined (like, for example, from ActiveSupport). Bug spotted by [Matt White ] + +0.1.0: + +- Initial public release diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..e0fb232 --- /dev/null +++ b/COPYING @@ -0,0 +1,19 @@ +# +# Copyright (c) 2006-2007 Marcel Molina Jr. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in the +# Software without restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the +# Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000..d36a8a0 --- /dev/null +++ b/INSTALL @@ -0,0 +1,55 @@ +== Rubygems + +The easiest way to install aws/s3 is with Rubygems: + + % sudo gem i aws-s3 -ry + +== Directly from svn + + % svn co svn://rubyforge.org/var/svn/amazon/s3/trunk aws + +== As a Rails plugin + +If you want to use aws/s3 with a Rails application, you can export the repository +into your plugins directory and then check it in: + + % cd my-rails-application/vendor/plugins + % svn export svn://rubyforge.org/var/svn/amazon/s3/trunk aws + % svn add aws + +Or you could pull it down with an svn:externals: + + % cd my-rails-application/vendor/plugins + % svn propedit svn:externals . + +Then add the following line, save and exit: + + aws svn://rubyforge.org/var/svn/amazon/s3/trunk + +If you go the svn route, be sure that you have all the dependencies installed. The list of dependencies follow. + +== Dependencies + +AWS::S3 requires Ruby 1.8.4 or greater. + +It also has the following dependencies: + + sudo gem i xml-simple -ry + sudo gem i builder -ry + sudo gem i mime-types -ry + +=== XML parsing (xml-simple) + +AWS::S3 depends on XmlSimple (http://xml-simple.rubyforge.org/). When installing aws/s3 with +Rubygems, this dependency will be taken care of for you. Otherwise, installation instructions are listed on the xml-simple +site. + +If your system has the Ruby libxml bindings installed (http://libxml.rubyforge.org/) they will be used instead of REXML (which is what XmlSimple uses). For those concerned with speed and efficiency, it would behoove you to install libxml (instructions here: http://libxml.rubyforge.org/install.html) as it is considerably faster and less expensive than REXML. + +=== XML generation (builder) + +AWS::S3 also depends on the Builder library (http://builder.rubyforge.org/ and http://rubyforge.org/projects/builder/). This will also automatically be installed for you when using Rubygems. + +=== Content type inference (mime-types) + +AWS::S3 depends on the MIME::Types library (http://mime-types.rubyforge.org/) to infer the content type of an object that does not explicitly specify it. This library will automatically be installed for you when using Rubygems. \ No newline at end of file diff --git a/README b/README new file mode 100644 index 0000000..9e791a5 --- /dev/null +++ b/README @@ -0,0 +1,545 @@ += AWS::S3 + +AWS::S3 is a Ruby library for Amazon's Simple Storage Service's REST API (http://aws.amazon.com/s3). +Full documentation of the currently supported API can be found at http://docs.amazonwebservices.com/AmazonS3/2006-03-01. + +== Getting started + +To get started you need to require 'aws/s3': + + % irb -rubygems + irb(main):001:0> require 'aws/s3' + # => true + +The AWS::S3 library ships with an interactive shell called s3sh. From within it, you have access to all the operations the library exposes from the command line. + + % s3sh + >> Version + +Before you can do anything, you must establish a connection using Base.establish_connection!. A basic connection would look something like this: + + AWS::S3::Base.establish_connection!( + :access_key_id => 'abc', + :secret_access_key => '123' + ) + +The minimum connection options that you must specify are your access key id and your secret access key. + +(If you don't already have your access keys, all you need to sign up for the S3 service is an account at Amazon. You can sign up for S3 and get access keys by visiting http://aws.amazon.com/s3.) + +For convenience, if you set two special environment variables with the value of your access keys, the console will automatically create a default connection for you. For example: + + % cat .amazon_keys + export AMAZON_ACCESS_KEY_ID='abcdefghijklmnop' + export AMAZON_SECRET_ACCESS_KEY='1234567891012345' + +Then load it in your shell's rc file. + + % cat .zshrc + if [[ -f "$HOME/.amazon_keys" ]]; then + source "$HOME/.amazon_keys"; + fi + +See more connection details at AWS::S3::Connection::Management::ClassMethods. + + +== AWS::S3 Basics +=== The service, buckets and objects + +The three main concepts of S3 are the service, buckets and objects. + +==== The service + +The service lets you find out general information about your account, like what buckets you have. + + Service.buckets + # => [] + + +==== Buckets + +Buckets are containers for objects (the files you store on S3). To create a new bucket you just specify its name. + + # Pick a unique name, or else you'll get an error + # if the name is already taken. + Bucket.create('jukebox') + +Bucket names must be unique across the entire S3 system, sort of like domain names across the internet. If you try +to create a bucket with a name that is already taken, you will get an error. + +Assuming the name you chose isn't already taken, your new bucket will now appear in the bucket list: + + Service.buckets + # => [#"jukebox"}>] + +Once you have succesfully created a bucket you can you can fetch it by name using Bucket.find. + + music_bucket = Bucket.find('jukebox') + +The bucket that is returned will contain a listing of all the objects in the bucket. + + music_bucket.objects.size + # => 0 + +If all you are interested in is the objects of the bucket, you can get to them directly using Bucket.objects. + + Bucket.objects('jukebox').size + # => 0 + +By default all objects will be returned, though there are several options you can use to limit what is returned, such as +specifying that only objects whose name is after a certain place in the alphabet be returned, and etc. Details about these options can +be found in the documentation for Bucket.find. + +To add an object to a bucket you specify the name of the object, its value, and the bucket to put it in. + + file = 'black-flowers.mp3' + S3Object.store(file, open(file), 'jukebox') + +You'll see your file has been added to it: + + music_bucket.objects + # => [#] + +You can treat your bucket like a hash and access objects by name: + + jukebox['black-flowers.mp3'] + # => # + +In the event that you want to delete a bucket, you can use Bucket.delete. + + Bucket.delete('jukebox') + +Keep in mind, like unix directories, you can not delete a bucket unless it is empty. Trying to delete a bucket +that contains objects will raise a BucketNotEmpty exception. + +Passing the :force => true option to delete will take care of deleting all the bucket's objects for you. + + Bucket.delete('photos', :force => true) + # => true + + +==== Objects + +S3Objects represent the data you store on S3. They have a key (their name) and a value (their data). All objects belong to a +bucket. + +You can store an object on S3 by specifying a key, its data and the name of the bucket you want to put it in: + + S3Object.store('me.jpg', open('headshot.jpg'), 'photos') + +The content type of the object will be inferred by its extension. If the appropriate content type can not be inferred, S3 defaults +to binary/octect-stream. + +If you want to override this, you can explicitly indicate what content type the object should have with the :content_type option: + + file = 'black-flowers.m4a' + S3Object.store( + file, + open(file), + 'jukebox', + :content_type => 'audio/mp4a-latm' + ) + +You can read more about storing files on S3 in the documentation for S3Object.store. + +If you just want to fetch an object you've stored on S3, you just specify its name and its bucket: + + picture = S3Object.find 'headshot.jpg', 'photos' + +N.B. The actual data for the file is not downloaded in both the example where the file appeared in the bucket and when fetched directly. +You get the data for the file like this: + + picture.value + +You can fetch just the object's data directly: + + S3Object.value 'headshot.jpg', 'photos' + +Or stream it by passing a block to stream: + + open('song.mp3', 'w') do |file| + S3Object.stream('song.mp3', 'jukebox') do |chunk| + file.write chunk + end + end + +The data of the file, once download, is cached, so subsequent calls to value won't redownload the file unless you +tell the object to reload its value: + + # Redownloads the file's data + song.value(:reload) + +Other functionality includes: + + # Check if an object exists? + S3Object.exists? 'headshot.jpg', 'photos' + + # Copying an object + S3Object.copy 'headshot.jpg', 'headshot2.jpg', 'photos' + + # Renaming an object + S3Object.rename 'headshot.jpg', 'portrait.jpg', 'photos' + + # Deleting an object + S3Object.delete 'headshot.jpg', 'photos' + +==== More about objects and their metadata + +You can find out the content type of your object with the content_type method: + + song.content_type + # => "audio/mpeg" + +You can change the content type as well if you like: + + song.content_type = 'application/pdf' + song.store + +(Keep in mind that due to limitiations in S3's exposed API, the only way to change things like the content_type +is to PUT the object onto S3 again. In the case of large files, this will result in fully re-uploading the file.) + +A bevie of information about an object can be had using the about method: + + pp song.about + {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT", + "content-type" => "binary/octect-stream", + "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"", + "date" => "Sat, 28 Oct 2006 21:30:41 GMT", + "x-amz-request-id" => "B7BC68F55495B1C8", + "server" => "AmazonS3", + "content-length" => "3418766"} + +You can get and set metadata for an object: + + song.metadata + # => {} + song.metadata[:album] = "A River Ain't Too Much To Love" + # => "A River Ain't Too Much To Love" + song.metadata[:released] = 2005 + pp song.metadata + {"x-amz-meta-released" => 2005, + "x-amz-meta-album" => "A River Ain't Too Much To Love"} + song.store + +That metadata will be saved in S3 and is hence forth available from that object: + + song = S3Object.find('black-flowers.mp3', 'jukebox') + pp song.metadata + {"x-amz-meta-released" => "2005", + "x-amz-meta-album" => "A River Ain't Too Much To Love"} + song.metada[:released] + # => "2005" + song.metada[:released] = 2006 + pp song.metada + {"x-amz-meta-released" => 2006, + "x-amz-meta-album" => "A River Ain't Too Much To Love"} + + +==== Streaming uploads + +When storing an object on the S3 servers using S3Object.store, the data argument can be a string or an I/O stream. +If data is an I/O stream it will be read in segments and written to the socket incrementally. This approach +may be desirable for very large files so they are not read into memory all at once. + + # Non streamed upload + S3Object.store('greeting.txt', 'hello world!', 'marcel') + + # Streamed upload + S3Object.store('roots.mpeg', open('roots.mpeg'), 'marcel') + + +== Setting the current bucket +==== Scoping operations to a specific bucket + +If you plan on always using a specific bucket for certain files, you can skip always having to specify the bucket by creating +a subclass of Bucket or S3Object and telling it what bucket to use: + + class JukeBoxSong < AWS::S3::S3Object + set_current_bucket_to 'jukebox' + end + +For all methods that take a bucket name as an argument, the current bucket will be used if the bucket name argument is omitted. + + other_song = 'baby-please-come-home.mp3' + JukeBoxSong.store(other_song, open(other_song)) + +This time we didn't have to explicitly pass in the bucket name, as the JukeBoxSong class knows that it will +always use the 'jukebox' bucket. + +"Astute readers", as they say, may have noticed that we used the third parameter to pass in the content type, +rather than the fourth parameter as we had the last time we created an object. If the bucket can be inferred, or +is explicitly set, as we've done in the JukeBoxSong class, then the third argument can be used to pass in +options. + +Now all operations that would have required a bucket name no longer do. + + other_song = JukeBoxSong.find('baby-please-come-home.mp3') + + +== BitTorrent +==== Another way to download large files + +Objects on S3 can be distributed via the BitTorrent file sharing protocol. + +You can get a torrent file for an object by calling torrent_for: + + S3Object.torrent_for 'kiss.jpg', 'marcel' + +Or just call the torrent method if you already have the object: + + song = S3Object.find 'kiss.jpg', 'marcel' + song.torrent + +Calling grant_torrent_access_to on a object will allow anyone to anonymously +fetch the torrent file for that object: + + S3Object.grant_torrent_access_to 'kiss.jpg', 'marcel' + +Anonymous requests to + + http://s3.amazonaws.com/marcel/kiss.jpg?torrent + +will serve up the torrent file for that object. + + +== Access control +==== Using canned access control policies + +By default buckets are private. This means that only the owner has access rights to the bucket and its objects. +Objects in that bucket inherit the permission of the bucket unless otherwise specified. When an object is private, the owner can +generate a signed url that exposes the object to anyone who has that url. Alternatively, buckets and objects can be given other +access levels. Several canned access levels are defined: + +* :private - Owner gets FULL_CONTROL. No one else has any access rights. This is the default. +* :public_read - Owner gets FULL_CONTROL and the anonymous principal is granted READ access. If this policy is used on an object, it can be read from a browser with no authentication. +* :public_read_write - Owner gets FULL_CONTROL, the anonymous principal is granted READ and WRITE access. This is a useful policy to apply to a bucket, if you intend for any anonymous user to PUT objects into the bucket. +* :authenticated_read - Owner gets FULL_CONTROL, and any principal authenticated as a registered Amazon S3 user is granted READ access. + +You can set a canned access level when you create a bucket or an object by using the :access option: + + S3Object.store( + 'kiss.jpg', + data, + 'marcel', + :access => :public_read + ) + +Since the image we created is publicly readable, we can access it directly from a browser by going to the corresponding bucket name +and specifying the object's key without a special authenticated url: + + http://s3.amazonaws.com/marcel/kiss.jpg + +==== Building custum access policies + +For both buckets and objects, you can use the acl method to see its access control policy: + + policy = S3Object.acl('kiss.jpg', 'marcel') + pp policy.grants + [#, + #] + +Policies are made up of one or more grants which grant a specific permission to some grantee. Here we see the default FULL_CONTROL grant +to the owner of this object. There is also READ permission granted to the Allusers Group, which means anyone has read access for the object. + +Say we wanted to grant access to anyone to read the access policy of this object. The current READ permission only grants them permission to read +the object itself (for example, from a browser) but it does not allow them to read the access policy. For that we will need to grant the AllUsers group the READ_ACP permission. + +First we'll create a new grant object: + + grant = ACL::Grant.new + # => # + grant.permission = 'READ_ACP' + +Now we need to indicate who this grant is for. In other words, who the grantee is: + + grantee = ACL::Grantee.new + # => # + +There are three ways to specify a grantee: 1) by their internal amazon id, such as the one returned with an object's Owner, +2) by their Amazon account email address or 3) by specifying a group. As of this writing you can not create custom groups, but +Amazon does provide three already: AllUsers, Authenticated and LogDelivery. In this case we want to provide the grant to all users. +This effectively means "anyone". + + grantee.group = 'AllUsers' + +Now that our grantee is setup, we'll associate it with the grant: + + grant.grantee = grantee + grant + # => # + +Are grant has all the information we need. Now that it's ready, we'll add it on to the object's access control policy's list of grants: + + policy.grants << grant + pp policy.grants + [#, + #, + #] + +Now that the policy has the new grant, we reuse the acl method to persist the policy change: + + S3Object.acl('kiss.jpg', 'marcel', policy) + +If we fetch the object's policy again, we see that the grant has been added: + + pp S3Object.acl('kiss.jpg', 'marcel').grants + [#, + #, + #] + +If we were to access this object's acl url from a browser: + + http://s3.amazonaws.com/marcel/kiss.jpg?acl + +we would be shown its access control policy. + +==== Pre-prepared grants + +Alternatively, the ACL::Grant class defines a set of stock grant policies that you can fetch by name. In most cases, you can +just use one of these pre-prepared grants rather than building grants by hand. Two of these stock policies are :public_read +and :public_read_acp, which happen to be the two grants that we built by hand above. In this case we could have simply written: + + policy.grants << ACL::Grant.grant(:public_read) + policy.grants << ACL::Grant.grant(:public_read_acp) + S3Object.acl('kiss.jpg', 'marcel', policy) + +The full details can be found in ACL::Policy, ACL::Grant and ACL::Grantee. + + +==== Accessing private objects from a browser + +All private objects are accessible via an authenticated GET request to the S3 servers. You can generate an +authenticated url for an object like this: + + S3Object.url_for('beluga_baby.jpg', 'marcel_molina') + +By default authenticated urls expire 5 minutes after they were generated. + +Expiration options can be specified either with an absolute time since the epoch with the :expires options, +or with a number of seconds relative to now with the :expires_in options: + + # Absolute expiration date + # (Expires January 18th, 2038) + doomsday = Time.mktime(2038, 1, 18).to_i + S3Object.url_for('beluga_baby.jpg', + 'marcel', + :expires => doomsday) + + # Expiration relative to now specified in seconds + # (Expires in 3 hours) + S3Object.url_for('beluga_baby.jpg', + 'marcel', + :expires_in => 60 * 60 * 3) + +You can specify whether the url should go over SSL with the :use_ssl option: + + # Url will use https protocol + S3Object.url_for('beluga_baby.jpg', + 'marcel', + :use_ssl => true) + +By default, the ssl settings for the current connection will be used. + +If you have an object handy, you can use its url method with the same objects: + + song.url(:expires_in => 30) + +To get an unauthenticated url for the object, such as in the case +when the object is publicly readable, pass the +:authenticated option with a value of false. + + S3Object.url_for('beluga_baby.jpg', + 'marcel', + :authenticated => false) + # => http://s3.amazonaws.com/marcel/beluga_baby.jpg + + +== Logging +==== Tracking requests made on a bucket + +A bucket can be set to log the requests made on it. By default logging is turned off. You can check if a bucket has logging enabled: + + Bucket.logging_enabled_for? 'jukebox' + # => false + +Enabling it is easy: + + Bucket.enable_logging_for('jukebox') + +Unless you specify otherwise, logs will be written to the bucket you want to log. The logs are just like any other object. By default they will start with the prefix 'log-'. You can customize what bucket you want the logs to be delivered to, as well as customize what the log objects' key is prefixed with by setting the target_bucket and target_prefix option: + + Bucket.enable_logging_for( + 'jukebox', 'target_bucket' => 'jukebox-logs' + ) + +Now instead of logging right into the jukebox bucket, the logs will go into the bucket called jukebox-logs. + +Once logs have accumulated, you can access them using the logs method: + + pp Bucket.logs('jukebox') + [#, + #, + #] + +Each log has a lines method that gives you information about each request in that log. All the fields are available +as named methods. More information is available in Logging::Log::Line. + + logs = Bucket.logs('jukebox') + log = logs.first + line = log.lines.first + line.operation + # => 'REST.GET.LOGGING_STATUS' + line.request_uri + # => 'GET /jukebox?logging HTTP/1.1' + line.remote_ip + # => "67.165.183.125" + +Disabling logging is just as simple as enabling it: + + Bucket.disable_logging_for('jukebox') + + +== Errors +==== When things go wrong + +Anything you do that makes a request to S3 could result in an error. If it does, the AWS::S3 library will raise an exception +specific to the error. All exception that are raised as a result of a request returning an error response inherit from the +ResponseError exception. So should you choose to rescue any such exception, you can simple rescue ResponseError. + +Say you go to delete a bucket, but the bucket turns out to not be empty. This results in a BucketNotEmpty error (one of the many +errors listed at http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ErrorCodeList.html): + + begin + Bucket.delete('jukebox') + rescue ResponseError => error + # ... + end + +Once you've captured the exception, you can extract the error message from S3, as well as the full error response, which includes +things like the HTTP response code: + + error + # => # + error.message + # => "The bucket you tried to delete is not empty" + error.response.code + # => 409 + +You could use this information to redisplay the error in a way you see fit, or just to log the error and continue on. + + +==== Accessing the last request's response + +Sometimes methods that make requests to the S3 servers return some object, like a Bucket or an S3Object. +Othertimes they return just true. Other times they raise an exception that you may want to rescue. Despite all these +possible outcomes, every method that makes a request stores its response object for you in Service.response. You can always +get to the last request's response via Service.response. + + objects = Bucket.objects('jukebox') + Service.response.success? + # => true + +This is also useful when an error exception is raised in the console which you weren't expecting. You can +root around in the response to get more details of what might have gone wrong. + + \ No newline at end of file diff --git a/README.erb b/README.erb new file mode 100644 index 0000000..e92efaf --- /dev/null +++ b/README.erb @@ -0,0 +1,58 @@ += AWS::S3 + +<%= docs_for['AWS::S3'] %> + +== AWS::S3 Basics +=== The service, buckets and objects + +The three main concepts of S3 are the service, buckets and objects. + +==== The service + +<%= docs_for['AWS::S3::Service'] %> + +==== Buckets + +<%= docs_for['AWS::S3::Bucket'] %> + +==== Objects + +<%= docs_for['AWS::S3::S3Object'] %> + +==== Streaming uploads + +<%= docs_for['AWS::S3::S3Object::store'] %> + +== Setting the current bucket +==== Scoping operations to a specific bucket + +<%= docs_for['AWS::S3::Base.set_current_bucket_to'] %> + +== BitTorrent +==== Another way to download large files + +<%= docs_for['AWS::S3::BitTorrent'] %> + +== Access control +==== Using canned access control policies + +<%= docs_for['AWS::S3::ACL'] %> + +==== Accessing private objects from a browser + +<%= docs_for['AWS::S3::S3Object.url_for'] %> + +== Logging +==== Tracking requests made on a bucket + +<%= docs_for['AWS::S3::Logging'] %> + +== Errors +==== When things go wrong + +<%= docs_for['AWS::S3::Error'] %> + +==== Accessing the last request's response + +<%= docs_for['AWS::S3::Service.response'] %> + \ No newline at end of file diff --git a/Rakefile b/Rakefile new file mode 100644 index 0000000..9aaf743 --- /dev/null +++ b/Rakefile @@ -0,0 +1,328 @@ +require 'rubygems' +require 'rake' +require 'rake/testtask' +require 'rake/rdoctask' +require 'rake/packagetask' +require 'rake/gempackagetask' + +require File.dirname(__FILE__) + '/lib/aws/s3' + +def library_root + File.dirname(__FILE__) +end + +task :default => :test + +Rake::TestTask.new do |test| + test.pattern = 'test/*_test.rb' + test.verbose = true +end + +namespace :doc do + Rake::RDocTask.new do |rdoc| + rdoc.rdoc_dir = 'doc' + rdoc.title = "AWS::S3 -- Support for Amazon S3's REST api" + rdoc.options << '--line-numbers' << '--inline-source' + rdoc.rdoc_files.include('README') + rdoc.rdoc_files.include('COPYING') + rdoc.rdoc_files.include('INSTALL') + rdoc.rdoc_files.include('lib/**/*.rb') + end + + task :rdoc => 'doc:readme' + + task :refresh => :rerdoc do + system 'open doc/index.html' + end + + task :readme do + require 'support/rdoc/code_info' + RDoc::CodeInfo.parse('lib/**/*.rb') + + strip_comments = lambda {|comment| comment.gsub(/^# ?/, '')} + docs_for = lambda do |location| + info = RDoc::CodeInfo.for(location) + raise RuntimeError, "Couldn't find documentation for `#{location}'" unless info + strip_comments[info.comment] + end + + open('README', 'w') do |file| + file.write ERB.new(IO.read('README.erb')).result(binding) + end + end + + task :deploy => :rerdoc do + sh %(scp -r doc marcel@rubyforge.org:/var/www/gforge-projects/amazon/) + end +end + +namespace :dist do + spec = Gem::Specification.new do |s| + s.name = 'aws-s3' + s.version = Gem::Version.new(AWS::S3::Version) + s.summary = "Client library for Amazon's Simple Storage Service's REST API" + s.description = s.summary + s.email = 'marcel@vernix.org' + s.author = 'Marcel Molina Jr.' + s.has_rdoc = true + s.extra_rdoc_files = %w(README COPYING INSTALL) + s.homepage = 'http://amazon.rubyforge.org' + s.rubyforge_project = 'amazon' + s.files = FileList['Rakefile', 'lib/**/*.rb', 'bin/*', 'support/**/*.rb'] + s.executables << 's3sh' + s.test_files = Dir['test/**/*'] + + s.add_dependency 'xml-simple' + s.add_dependency 'builder' + s.add_dependency 'mime-types' + s.rdoc_options = ['--title', "AWS::S3 -- Support for Amazon S3's REST api", + '--main', 'README', + '--line-numbers', '--inline-source'] + end + + # Regenerate README before packaging + task :package => 'doc:readme' + Rake::GemPackageTask.new(spec) do |pkg| + pkg.need_tar_gz = true + pkg.package_files.include('{lib,script,test,support}/**/*') + pkg.package_files.include('README') + pkg.package_files.include('COPYING') + pkg.package_files.include('INSTALL') + pkg.package_files.include('Rakefile') + end + + desc 'Install with gems' + task :install => :repackage do + sh "sudo gem i pkg/#{spec.name}-#{spec.version}.gem" + end + + desc 'Uninstall gem' + task :uninstall do + sh "sudo gem uninstall #{spec.name} -x" + end + + desc 'Reinstall gem' + task :reinstall => [:uninstall, :install] + + task :confirm_release do + print "Releasing version #{spec.version}. Are you sure you want to proceed? [Yn] " + abort if STDIN.getc == ?n + end + + desc 'Tag release' + task :tag do + svn_root = 'svn+ssh://marcel@rubyforge.org/var/svn/amazon/s3' + sh %(svn cp #{svn_root}/trunk #{svn_root}/tags/rel-#{spec.version} -m "Tag #{spec.name} release #{spec.version}") + end + + desc 'Update changelog to include a release marker' + task :add_release_marker_to_changelog do + changelog = IO.read('CHANGELOG') + changelog.sub!(/^trunk:/, "#{spec.version}:") + + open('CHANGELOG', 'w') do |file| + file.write "trunk:\n\n#{changelog}" + end + end + + task :commit_changelog do + sh %(svn ci CHANGELOG -m "Bump changelog version marker for release") + end + + package_name = lambda {|specification| File.join('pkg', "#{specification.name}-#{specification.version}")} + + desc 'Push a release to rubyforge' + task :release => [:confirm_release, :clean, :add_release_marker_to_changelog, :package, :commit_changelog, :tag] do + require 'rubyforge' + package = package_name[spec] + + rubyforge = RubyForge.new + rubyforge.login + + version_already_released = lambda do + releases = rubyforge.autoconfig['release_ids'] + releases.has_key?(spec.name) && releases[spec.name][spec.version] + end + + abort("Release #{spec.version} already exists!") if version_already_released.call + + if release_id = rubyforge.add_release(spec.rubyforge_project, spec.name, spec.version, "#{package}.tar.gz") + rubyforge.add_file(spec.rubyforge_project, spec.name, release_id, "#{package}.gem") + else + puts 'Release failed!' + end + end + + desc 'Upload a beta gem' + task :push_beta_gem => [:clobber_package, :package] do + beta_gem = package_name[spec] + sh %(scp #{beta_gem}.gem marcel@rubyforge.org:/var/www/gforge-projects/amazon/beta) + end + + task :spec do + puts spec.to_ruby + end +end + +desc 'Check code to test ratio' +task :stats do + library_files = FileList["#{library_root}/lib/**/*.rb"] + test_files = FileList["#{library_root}/test/**/*_test.rb"] + count_code_lines = Proc.new do |lines| + lines.inject(0) do |code_lines, line| + next code_lines if [/^\s*$/, /^\s*#/].any? {|non_code_line| non_code_line === line} + code_lines + 1 + end + end + + count_code_lines_for_files = Proc.new do |files| + files.inject(0) {|code_lines, file| code_lines + count_code_lines[IO.read(file)]} + end + + library_code_lines = count_code_lines_for_files[library_files] + test_code_lines = count_code_lines_for_files[test_files] + ratio = Proc.new { sprintf('%.2f', test_code_lines.to_f / library_code_lines)} + + puts "Code LOC: #{library_code_lines} Test LOC: #{test_code_lines} Code to Test Ratio: 1:#{ratio.call}" +end + +namespace :test do + find_file = lambda do |name| + file_name = lambda {|path| File.join(path, "#{name}.rb")} + root = $:.detect do |path| + File.exist?(file_name[path]) + end + file_name[root] if root + end + + TEST_LOADER = find_file['rake/rake_test_loader'] + multiruby = lambda do |glob| + system 'multiruby', TEST_LOADER, *Dir.glob(glob) + end + + desc 'Check test coverage' + task :coverage do + system("rcov --sort coverage #{File.join(library_root, 'test/*_test.rb')}") + show_test_coverage_results + end + + Rake::TestTask.new(:remote) do |test| + test.pattern = 'test/remote/*_test.rb' + test.verbose = true + end + + Rake::TestTask.new(:all) do |test| + test.pattern = 'test/**/*_test.rb' + test.verbose = true + end + + desc 'Check test coverage of full stack remote tests' + task :full_coverage do + system("rcov --sort coverage #{File.join(library_root, 'test/remote/*_test.rb')} #{File.join(library_root, 'test/*_test.rb')}") + show_test_coverage_results + end + + desc 'Run local tests against multiple versions of Ruby' + task :version_audit do + multiruby['test/*_test.rb'] + end + + namespace :version_audit do + desc 'Run remote tests against multiple versions of Ruby' + task :remote do + multiruby['test/remote/*_test.rb'] + end + + desc 'Run all tests against multiple versions of Ruby' + task :all do + multiruby['test/**/*_test.rb'] + end + end + + def show_test_coverage_results + system("open #{File.join(library_root, 'coverage/index.html')}") if PLATFORM['darwin'] + end + + desc 'Remove coverage products' + task :clobber_coverage do + rm_r 'coverage' rescue nil + end +end + +namespace :todo do + class << TODOS = IO.read(File.join(library_root, 'TODO')) + def items + split("\n").grep(/^\[\s|X\]/) + end + + def completed + find_items_matching(/^\[X\]/) + end + + def uncompleted + find_items_matching(/^\[\s\]/) + end + + def find_items_matching(regexp) + items.grep(regexp).instance_eval do + def display + puts map {|item| "* #{item.sub(/^\[[^\]]\]\s/, '')}"} + end + self + end + end + end + + desc 'Completed todo items' + task :completed do + TODOS.completed.display + end + + desc 'Incomplete todo items' + task :uncompleted do + TODOS.uncompleted.display + end +end if File.exists?(File.join(library_root, 'TODO')) + +namespace :site do + require 'erb' + require 'rdoc/markup/simple_markup' + require 'rdoc/markup/simple_markup/to_html' + + readme = lambda { IO.read('README')[/^== Getting started\n(.*)/m, 1] } + + readme_to_html = lambda do + handler = SM::ToHtml.new + handler.instance_eval do + require 'syntax' + require 'syntax/convertors/html' + def accept_verbatim(am, fragment) + syntax = Syntax::Convertors::HTML.for_syntax('ruby') + @res << %(
#{syntax.convert(fragment.txt, true)}
) + end + end + SM::SimpleMarkup.new.convert(readme.call, handler) + end + + desc 'Regenerate the public website page' + task :build => 'doc:readme' do + open('site/public/index.html', 'w') do |file| + erb_data = {} + erb_data[:readme] = readme_to_html.call + file.write ERB.new(IO.read('site/index.erb')).result(binding) + end + end + + task :refresh => :build do + system 'open site/public/index.html' + end + + desc 'Update the live website' + task :deploy => :build do + site_files = FileList['site/public/*'] + site_files.delete_if {|file| File.directory?(file)} + sh %(scp #{site_files.join ' '} marcel@rubyforge.org:/var/www/gforge-projects/amazon/) + end +end + +task :clean => ['dist:clobber_package', 'doc:clobber_rdoc', 'test:clobber_coverage'] diff --git a/TODO b/TODO new file mode 100644 index 0000000..32099b5 --- /dev/null +++ b/TODO @@ -0,0 +1,26 @@ +0.3.0 + + [ ] Alias make alias for establish_connection! that is non-bang + + [ ] Pass filter criteria like :max_keys onto methods like logs_for and logs which return logs. + [ ] Add high level support to custom logging information as documented in the "Adding Custom Information..." here http://docs.amazonwebservices.com/AmazonS3/2006-03-01/LogFormat.html + +[ ] Bucket.delete(:force => true) needs to fetch all objects in the bucket until there are no more, taking into account the max-keys limit of 1000 objects at a time and it needs to do so in a very efficient manner so it can handle very large buckets (using :prefix and :marker) +[ ] Ability to set content_type on S3Object that has not been stored yet +[ ] Allow symbol and abbreviated version of logging options ('target_prefix' => :prefix, 'target_bucket' => :bucket) +[ ] Allow symbol options for grant's constructor ('permission' => :permission) +[ ] Reconsider save method to Policies returned by Bucket and S3Object's acl instance method so you can do some_object.acl.save after modifying it rather than some_object.acl(some_object.acl) + +[X] S3Object.copy and S3Object.move should preserve the acl +[X] Consider opening up Net::HTTPGenericRequest to replace hardcoded chunk_size to something greater than 1k (maybe 500k since the files are presumed to be quite large) +[X] Add S3Object.exists? +[X] See about replacing XmlSimple with libxml if it's installed since XmlSimple can be rather slow (due to wrapping REXML) +[X] Ability to build up the README from internal docs so documentation for various classes and the README can feed from a single source +[X] Bittorrent documentation +[X] Document logging methods +[X] Bittorrent +[X] ACL documentation +[X] Log management ([de]activation & retrieval) +[X] Remote ACL tests +[X] ACL requesting and parsing +[X] ACL updating for already stored objects which merges with existing ACL diff --git a/bin/s3sh b/bin/s3sh new file mode 100644 index 0000000..c4d546e --- /dev/null +++ b/bin/s3sh @@ -0,0 +1,6 @@ +#!/usr/bin/env ruby +s3_lib = File.dirname(__FILE__) + '/../lib/aws/s3' +setup = File.dirname(__FILE__) + '/setup' +irb_name = RUBY_PLATFORM =~ /mswin32/ ? 'irb.bat' : 'irb' + +exec "#{irb_name} -r #{s3_lib} -r #{setup} --simple-prompt" \ No newline at end of file diff --git a/bin/setup.rb b/bin/setup.rb new file mode 100644 index 0000000..7759c2c --- /dev/null +++ b/bin/setup.rb @@ -0,0 +1,10 @@ +#!/usr/bin/env ruby +if ENV['AMAZON_ACCESS_KEY_ID'] && ENV['AMAZON_SECRET_ACCESS_KEY'] + AWS::S3::Base.establish_connection!( + :access_key_id => ENV['AMAZON_ACCESS_KEY_ID'], + :secret_access_key => ENV['AMAZON_SECRET_ACCESS_KEY'] + ) +end + +require File.dirname(__FILE__) + '/../test/fixtures' +include AWS::S3 \ No newline at end of file diff --git a/lib/aws/s3.rb b/lib/aws/s3.rb new file mode 100644 index 0000000..fc7d3f0 --- /dev/null +++ b/lib/aws/s3.rb @@ -0,0 +1,61 @@ +require 'base64' +require 'cgi' +require 'uri' +require 'openssl' +require 'digest/sha1' +require 'net/https' +require 'time' +require 'date' +require 'open-uri' + +$:.unshift(File.dirname(__FILE__)) +require 's3/extensions' +require_library_or_gem 'builder' unless defined? Builder +require_library_or_gem 'mime/types' unless defined? MIME::Types + +require 's3/base' +require 's3/version' +require 's3/parsing' +require 's3/acl' +require 's3/logging' +require 's3/bittorrent' +require 's3/service' +require 's3/owner' +require 's3/bucket' +require 's3/object' +require 's3/error' +require 's3/exceptions' +require 's3/connection' +require 's3/authentication' +require 's3/response' + +AWS::S3::Base.class_eval do + include AWS::S3::Connection::Management +end + +AWS::S3::Bucket.class_eval do + include AWS::S3::Logging::Management + include AWS::S3::ACL::Bucket +end + +AWS::S3::S3Object.class_eval do + include AWS::S3::ACL::S3Object + include AWS::S3::BitTorrent +end + +require_library_or_gem 'xmlsimple' unless defined? XmlSimple +# If libxml is installed, we use the FasterXmlSimple library, that provides most of the functionality of XmlSimple +# except it uses the xml/libxml library for xml parsing (rather than REXML). If libxml isn't installed, we just fall back on +# XmlSimple. +AWS::S3::Parsing.parser = + begin + require_library_or_gem 'xml/libxml' + # Older version of libxml aren't stable (bus error when requesting attributes that don't exist) so we + # have to use a version greater than '0.3.8.2'. + raise LoadError unless XML::Parser::VERSION > '0.3.8.2' + $:.push(File.join(File.dirname(__FILE__), '..', '..', 'support', 'faster-xml-simple', 'lib')) + require_library_or_gem 'faster_xml_simple' + FasterXmlSimple + rescue LoadError + XmlSimple + end diff --git a/lib/aws/s3/acl.rb b/lib/aws/s3/acl.rb new file mode 100644 index 0000000..9452e77 --- /dev/null +++ b/lib/aws/s3/acl.rb @@ -0,0 +1,636 @@ +module AWS + module S3 + # By default buckets are private. This means that only the owner has access rights to the bucket and its objects. + # Objects in that bucket inherit the permission of the bucket unless otherwise specified. When an object is private, the owner can + # generate a signed url that exposes the object to anyone who has that url. Alternatively, buckets and objects can be given other + # access levels. Several canned access levels are defined: + # + # * :private - Owner gets FULL_CONTROL. No one else has any access rights. This is the default. + # * :public_read - Owner gets FULL_CONTROL and the anonymous principal is granted READ access. If this policy is used on an object, it can be read from a browser with no authentication. + # * :public_read_write - Owner gets FULL_CONTROL, the anonymous principal is granted READ and WRITE access. This is a useful policy to apply to a bucket, if you intend for any anonymous user to PUT objects into the bucket. + # * :authenticated_read - Owner gets FULL_CONTROL, and any principal authenticated as a registered Amazon S3 user is granted READ access. + # + # You can set a canned access level when you create a bucket or an object by using the :access option: + # + # S3Object.store( + # 'kiss.jpg', + # data, + # 'marcel', + # :access => :public_read + # ) + # + # Since the image we created is publicly readable, we can access it directly from a browser by going to the corresponding bucket name + # and specifying the object's key without a special authenticated url: + # + # http://s3.amazonaws.com/marcel/kiss.jpg + # + # ==== Building custum access policies + # + # For both buckets and objects, you can use the acl method to see its access control policy: + # + # policy = S3Object.acl('kiss.jpg', 'marcel') + # pp policy.grants + # [#, + # #] + # + # Policies are made up of one or more grants which grant a specific permission to some grantee. Here we see the default FULL_CONTROL grant + # to the owner of this object. There is also READ permission granted to the Allusers Group, which means anyone has read access for the object. + # + # Say we wanted to grant access to anyone to read the access policy of this object. The current READ permission only grants them permission to read + # the object itself (for example, from a browser) but it does not allow them to read the access policy. For that we will need to grant the AllUsers group the READ_ACP permission. + # + # First we'll create a new grant object: + # + # grant = ACL::Grant.new + # # => # + # grant.permission = 'READ_ACP' + # + # Now we need to indicate who this grant is for. In other words, who the grantee is: + # + # grantee = ACL::Grantee.new + # # => # + # + # There are three ways to specify a grantee: 1) by their internal amazon id, such as the one returned with an object's Owner, + # 2) by their Amazon account email address or 3) by specifying a group. As of this writing you can not create custom groups, but + # Amazon does provide three already: AllUsers, Authenticated and LogDelivery. In this case we want to provide the grant to all users. + # This effectively means "anyone". + # + # grantee.group = 'AllUsers' + # + # Now that our grantee is setup, we'll associate it with the grant: + # + # grant.grantee = grantee + # grant + # # => # + # + # Are grant has all the information we need. Now that it's ready, we'll add it on to the object's access control policy's list of grants: + # + # policy.grants << grant + # pp policy.grants + # [#, + # #, + # #] + # + # Now that the policy has the new grant, we reuse the acl method to persist the policy change: + # + # S3Object.acl('kiss.jpg', 'marcel', policy) + # + # If we fetch the object's policy again, we see that the grant has been added: + # + # pp S3Object.acl('kiss.jpg', 'marcel').grants + # [#, + # #, + # #] + # + # If we were to access this object's acl url from a browser: + # + # http://s3.amazonaws.com/marcel/kiss.jpg?acl + # + # we would be shown its access control policy. + # + # ==== Pre-prepared grants + # + # Alternatively, the ACL::Grant class defines a set of stock grant policies that you can fetch by name. In most cases, you can + # just use one of these pre-prepared grants rather than building grants by hand. Two of these stock policies are :public_read + # and :public_read_acp, which happen to be the two grants that we built by hand above. In this case we could have simply written: + # + # policy.grants << ACL::Grant.grant(:public_read) + # policy.grants << ACL::Grant.grant(:public_read_acp) + # S3Object.acl('kiss.jpg', 'marcel', policy) + # + # The full details can be found in ACL::Policy, ACL::Grant and ACL::Grantee. + module ACL + # The ACL::Policy class lets you inspect and modify access controls for buckets and objects. + # A policy is made up of one or more Grants which specify a permission and a Grantee to whom that permission is granted. + # + # Buckets and objects are given a default access policy which contains one grant permitting the owner of the bucket or object + # FULL_CONTROL over its contents. This means they can read the object, write to the object, as well as read and write its + # policy. + # + # The acl method for both buckets and objects returns the policy object for that entity: + # + # policy = Bucket.acl('some-bucket') + # + # The grants method of a policy exposes its grants. You can treat this collection as an array and push new grants onto it: + # + # policy.grants << grant + # + # Check the documentation for Grant and Grantee for more details on how to create new grants. + class Policy + include SelectiveAttributeProxy #:nodoc: + attr_accessor :owner, :grants + + def initialize(attributes = {}) + @attributes = attributes + @grants = [].extend(GrantListExtensions) + extract_owner! if owner? + extract_grants! if grants? + end + + # The xml representation of the policy. + def to_xml + Builder.new(owner, grants).to_s + end + + private + + def owner? + attributes.has_key?('owner') || !owner.nil? + end + + def grants? + (attributes.has_key?('access_control_list') && attributes['access_control_list']['grant']) || !grants.empty? + end + + def extract_owner! + @owner = Owner.new(attributes.delete('owner')) + end + + def extract_grants! + attributes['access_control_list']['grant'].each do |grant| + grants << Grant.new(grant) + end + end + + module GrantListExtensions #:nodoc: + def include?(grant) + case grant + when Symbol + super(ACL::Grant.grant(grant)) + else + super + end + end + + def delete(grant) + case grant + when Symbol + super(ACL::Grant.grant(grant)) + else + super + end + end + + # Two grant lists are equal if they have identical grants both in terms of permission and grantee. + def ==(grants) + size == grants.size && all? {|grant| grants.include?(grant)} + end + end + + class Builder < XmlGenerator #:nodoc: + attr_reader :owner, :grants + def initialize(owner, grants) + @owner = owner + @grants = grants.uniq # There could be some duplicate grants + super() + end + + def build + xml.tag!('AccessControlPolicy', 'xmlns' => 'http://s3.amazonaws.com/doc/2006-03-01/') do + xml.Owner do + xml.ID owner.id + xml.DisplayName owner.display_name + end + + xml.AccessControlList do + xml << grants.map {|grant| grant.to_xml}.join("\n") + end + end + end + end + end + + # A Policy is made up of one or more Grant objects. A grant sets a specific permission and grants it to the associated grantee. + # + # When creating a new grant to add to a policy, you need only set its permission and then associate with a Grantee. + # + # grant = ACL::Grant.new + # => # + # + # Here we see that neither the permission nor the grantee have been set. Let's make this grant provide the READ permission. + # + # grant.permission = 'READ' + # grant + # => # + # + # Now let's assume we have a grantee to the AllUsers group already set up. Just associate that grantee with our grant. + # + # grant.grantee = all_users_group_grantee + # grant + # => # + # + # And now are grant is complete. It provides READ permission to the AllUsers group, effectively making this object publicly readable + # without any authorization. + # + # Assuming we have some object's policy available in a local variable called policy, we can now add this grant onto its + # collection of grants. + # + # policy.grants << grant + # + # And then we send the updated policy to the S3 servers. + # + # some_s3object.acl(policy) + class Grant + include SelectiveAttributeProxy #:nodoc: + constant :VALID_PERMISSIONS, %w(READ WRITE READ_ACP WRITE_ACP FULL_CONTROL) + attr_accessor :grantee + + class << self + # Returns stock grants with name type. + # + # public_read_grant = ACL::Grant.grant :public_read + # => # + # + # Valid stock grant types are: + # + # * :authenticated_read + # * :authenticated_read_acp + # * :authenticated_write + # * :authenticated_write_acp + # * :logging_read + # * :logging_read_acp + # * :logging_write + # * :logging_write_acp + # * :public_read + # * :public_read_acp + # * :public_write + # * :public_write_acp + def grant(type) + case type + when *stock_grant_map.keys + build_stock_grant_for type + else + raise ArgumentError, "Unknown grant type `#{type}'" + end + end + + private + def stock_grant_map + grant = lambda {|permission, group| {:permission => permission, :group => group}} + groups = {:public => 'AllUsers', :authenticated => 'Authenticated', :logging => 'LogDelivery'} + permissions = %w(READ WRITE READ_ACP WRITE_ACP) + stock_grants = {} + groups.each do |grant_group_name, group_name| + permissions.each do |permission| + stock_grants["#{grant_group_name}_#{permission.downcase}".to_sym] = grant[permission, group_name] + end + end + stock_grants + end + memoized :stock_grant_map + + def build_stock_grant_for(type) + stock_grant = stock_grant_map[type] + grant = new do |g| + g.permission = stock_grant[:permission] + end + grant.grantee = Grantee.new do |gr| + gr.group = stock_grant[:group] + end + grant + end + end + + def initialize(attributes = {}) + attributes = {'permission' => nil}.merge(attributes) + @attributes = attributes + extract_grantee! + yield self if block_given? + end + + # Set the permission for this grant. + # + # grant.permission = 'READ' + # grant + # => # + # + # If the specified permisison level is not valid, an InvalidAccessControlLevel exception will be raised. + def permission=(permission_level) + unless self.class.valid_permissions.include?(permission_level) + raise InvalidAccessControlLevel.new(self.class.valid_permissions, permission_level) + end + attributes['permission'] = permission_level + end + + # The xml representation of this grant. + def to_xml + Builder.new(permission, grantee).to_s + end + + def inspect #:nodoc: + "#<%s:0x%s %s>" % [self.class, object_id, self] + end + + def to_s #:nodoc: + [permission || '(permission)', 'to', grantee ? grantee.type_representation : '(grantee)'].join ' ' + end + + def eql?(grant) #:nodoc: + # This won't work for an unposted AmazonCustomerByEmail because of the normalization + # to CanonicalUser but it will work for groups. + to_s == grant.to_s + end + alias_method :==, :eql? + + def hash #:nodoc: + to_s.hash + end + + private + + def extract_grantee! + @grantee = Grantee.new(attributes['grantee']) if attributes['grantee'] + end + + class Builder < XmlGenerator #:nodoc: + attr_reader :grantee, :permission + + def initialize(permission, grantee) + @permission = permission + @grantee = grantee + super() + end + + def build + xml.Grant do + xml << grantee.to_xml + xml.Permission permission + end + end + end + end + + # Grants bestow a access permission to grantees. Each grant of some access control list Policy is associated with a grantee. + # There are three ways of specifying a grantee at the time of this writing. + # + # * By canonical user - This format uses the id of a given Amazon account. The id value for a given account is available in the + # Owner object of a bucket, object or policy. + # + # grantee.id = 'bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1' + # + # Often the id will just be fetched from some owner object. + # + # grantee.id = some_object.owner.id + # + # * By amazon email address - You can specify an email address for any Amazon account. The Amazon account need not be signed up with the S3 service. + # though it must be unique across the entire Amazon system. This email address is normalized into a canonical user representation once the grant + # has been sent back up to the S3 servers. + # + # grantee.email_address = 'joe@example.org' + # + # * By group - As of this writing you can not create custom groups, but Amazon provides three group that you can use. See the documentation for the + # Grantee.group= method for details. + # + # grantee.group = 'Authenticated' + class Grantee + include SelectiveAttributeProxy #:nodoc: + + undef_method :id # Get rid of Object#id + + def initialize(attributes = {}) + # Set default values for attributes that may not be passed in but we still want the object + # to respond to + attributes = {'id' => nil, 'display_name' => nil, 'email_address' => nil, 'uri' => nil}.merge(attributes) + @attributes = attributes + extract_type! + yield self if block_given? + end + + # The xml representation of the current grantee object. + def to_xml + Builder.new(self).to_s + end + + # Returns the type of grantee. Will be one of CanonicalUser, AmazonCustomerByEmail or Group. + def type + return attributes['type'] if attributes['type'] + + # Lookups are in order of preference so if, for example, you set the uri but display_name and id are also + # set, we'd rather go with the canonical representation. + if display_name && id + 'CanonicalUser' + elsif email_address + 'AmazonCustomerByEmail' + elsif uri + 'Group' + end + end + + # Sets the grantee's group by name. + # + # grantee.group = 'AllUsers' + # + # Currently, valid groups defined by S3 are: + # + # * AllUsers: This group represents anyone. In other words, an anonymous request. + # * Authenticated: Any authenticated account on the S3 service. + # * LogDelivery: The entity that delivers bucket access logs. + def group=(group_name) + section = %w(AllUsers Authenticated).include?(group_name) ? 'global' : 's3' + self.uri = "http://acs.amazonaws.com/groups/#{section}/#{group_name}" + end + + # Returns the grantee's group. If the grantee is not a group, nil is returned. + def group + return unless uri + uri[%r([^/]+$)] + end + + def type_representation #:nodoc: + case type + when 'CanonicalUser' then display_name || id + when 'AmazonCustomerByEmail' then email_address + when 'Group' then "#{group} Group" + end + end + + def inspect #:nodoc: + "#<%s:0x%s %s>" % [self.class, object_id, type_representation || '(type not set yet)'] + end + + private + def extract_type! + attributes['type'] = attributes.delete('xsi:type') + end + + class Builder < XmlGenerator #:nodoc: + + def initialize(grantee) + @grantee = grantee + super() + end + + def build + xml.tag!('Grantee', attributes) do + representation + end + end + + private + attr_reader :grantee + + def representation + case grantee.type + when 'CanonicalUser' + xml.ID grantee.id + xml.DisplayName grantee.display_name + when 'AmazonCustomerByEmail' + xml.EmailAddress grantee.email_address + when 'Group' + xml.URI grantee.uri + end + end + + def attributes + {'xsi:type' => grantee.type, 'xmlns:xsi' => 'http://www.w3.org/2001/XMLSchema-instance'} + end + end + end + + module Bucket + def self.included(klass) #:nodoc: + klass.extend(ClassMethods) + end + + module ClassMethods + # The acl method is the single point of entry for reading and writing access control list policies for a given bucket. + # + # # Fetch the acl for the 'marcel' bucket + # policy = Bucket.acl 'marcel' + # + # # Modify the policy ... + # # ... + # + # # Send updated policy back to the S3 servers + # Bucket.acl 'marcel', policy + def acl(name = nil, policy = nil) + if name.is_a?(ACL::Policy) + policy = name + name = nil + end + + path = path(name) << '?acl' + respond_with ACL::Policy::Response do + policy ? put(path, {}, policy.to_xml) : ACL::Policy.new(get(path(name) << '?acl').policy) + end + end + end + + # The acl method returns and updates the acl for a given bucket. + # + # # Fetch a bucket + # bucket = Bucket.find 'marcel' + # + # # Add a grant to the bucket's policy + # bucket.acl.grants << some_grant + # + # # Write the changes to the policy + # bucket.acl(bucket.acl) + def acl(reload = false) + policy = reload.is_a?(ACL::Policy) ? reload : nil + memoize(reload) do + self.class.acl(name, policy) if policy + self.class.acl(name) + end + end + end + + module S3Object + def self.included(klass) #:nodoc: + klass.extend(ClassMethods) + end + + module ClassMethods + # The acl method is the single point of entry for reading and writing access control list policies for a given object. + # + # # Fetch the acl for the 'kiss.jpg' object in the 'marcel' bucket + # policy = S3Object.acl 'kiss.jpg', 'marcel' + # + # # Modify the policy ... + # # ... + # + # # Send updated policy back to the S3 servers + # S3Object.acl 'kiss.jpg', 'marcel', policy + def acl(name, bucket = nil, policy = nil) + # We're using the second argument as the ACL::Policy + if bucket.is_a?(ACL::Policy) + policy = bucket + bucket = nil + end + + bucket = bucket_name(bucket) + path = path!(bucket, name) << '?acl' + + respond_with ACL::Policy::Response do + policy ? put(path, {}, policy.to_xml) : ACL::Policy.new(get(path).policy) + end + end + end + + # The acl method returns and updates the acl for a given s3 object. + # + # # Fetch a the object + # object = S3Object.find 'kiss.jpg', 'marcel' + # + # # Add a grant to the object's + # object.acl.grants << some_grant + # + # # Write the changes to the policy + # object.acl(object.acl) + def acl(reload = false) + policy = reload.is_a?(ACL::Policy) ? reload : nil + memoize(reload) do + self.class.acl(key, bucket.name, policy) if policy + self.class.acl(key, bucket.name) + end + end + end + + class OptionProcessor #:nodoc: + attr_reader :options + class << self + def process!(options) + new(options).process! + end + end + + def initialize(options) + options.to_normalized_options! + @options = options + @access_level = extract_access_level + end + + def process! + return unless access_level_specified? + validate! + options['x-amz-acl'] = access_level + end + + private + def extract_access_level + options.delete('access') || options.delete('x-amz-acl') + end + + def validate! + raise InvalidAccessControlLevel.new(valid_levels, access_level) unless valid? + end + + def valid? + valid_levels.include?(access_level) + end + + def access_level_specified? + !@access_level.nil? + end + + def valid_levels + %w(private public-read public-read-write authenticated-read) + end + + def access_level + @normalized_access_level ||= @access_level.to_header + end + end + end + end +end diff --git a/lib/aws/s3/authentication.rb b/lib/aws/s3/authentication.rb new file mode 100644 index 0000000..8c16900 --- /dev/null +++ b/lib/aws/s3/authentication.rb @@ -0,0 +1,218 @@ +module AWS + module S3 + # All authentication is taken care of for you by the AWS::S3 library. None the less, some details of the two types + # of authentication and when they are used may be of interest to some. + # + # === Header based authentication + # + # Header based authentication is achieved by setting a special Authorization header whose value + # is formatted like so: + # + # "AWS #{access_key_id}:#{encoded_canonical}" + # + # The access_key_id is the public key that is assigned by Amazon for a given account which you use when + # establishing your initial connection. The encoded_canonical is computed according to rules layed out + # by Amazon which we will describe presently. + # + # ==== Generating the encoded canonical string + # + # The "canonical string", generated by the CanonicalString class, is computed by collecting the current request method, + # a set of significant headers of the current request, and the current request path into a string. + # That canonical string is then encrypted with the secret_access_key assigned by Amazon. The resulting encrypted canonical + # string is then base 64 encoded. + # + # === Query string based authentication + # + # When accessing a restricted object from the browser, you can authenticate via the query string, by setting the following parameters: + # + # "AWSAccessKeyId=#{access_key_id}&Expires=#{expires}&Signature=#{encoded_canonical}" + # + # The QueryString class is responsible for generating the appropriate parameters for authentication via the + # query string. + # + # The access_key_id and encoded_canonical are the same as described in the Header based authentication section. + # The expires value dictates for how long the current url is valid (by default, it will expire in 5 minutes). Expiration can be specified + # either by an absolute time (expressed in seconds since the epoch), or in relative time (in number of seconds from now). + # Details of how to customize the expiration of the url are provided in the documentation for the QueryString class. + # + # All requests made by this library use header authentication. When a query string authenticated url is needed, + # the S3Object#url method will include the appropriate query string parameters. + # + # === Full authentication specification + # + # The full specification of the authentication protocol can be found at + # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAuthentication.html + class Authentication + constant :AMAZON_HEADER_PREFIX, 'x-amz-' + + # Signature is the abstract super class for the Header and QueryString authentication methods. It does the job + # of computing the canonical_string using the CanonicalString class as well as encoding the canonical string. The subclasses + # parameterize these computations and arrange them in a string form appropriate to how they are used, in one case a http request + # header value, and in the other case key/value query string parameter pairs. + class Signature < String #:nodoc: + attr_reader :request, :access_key_id, :secret_access_key + + def initialize(request, access_key_id, secret_access_key, options = {}) + super() + @request, @access_key_id, @secret_access_key = request, access_key_id, secret_access_key + @options = options + end + + private + + def canonical_string + options = {} + options[:expires] = expires if expires? + CanonicalString.new(request, options) + end + memoized :canonical_string + + def encoded_canonical + digest = OpenSSL::Digest::Digest.new('sha1') + b64_hmac = Base64.encode64(OpenSSL::HMAC.digest(digest, secret_access_key, canonical_string)).strip + url_encode? ? CGI.escape(b64_hmac) : b64_hmac + end + + def url_encode? + !@options[:url_encode].nil? + end + + def expires? + is_a? QueryString + end + + def date + request['date'].to_s.strip.empty? ? Time.now : Time.parse(request['date']) + end + end + + # Provides header authentication by computing the value of the Authorization header. More details about the + # various authentication schemes can be found in the docs for its containing module, Authentication. + class Header < Signature #:nodoc: + def initialize(*args) + super + self << "AWS #{access_key_id}:#{encoded_canonical}" + end + end + + # Provides query string authentication by computing the three authorization parameters: AWSAccessKeyId, Expires and Signature. + # More details about the various authentication schemes can be found in the docs for its containing module, Authentication. + class QueryString < Signature #:nodoc: + constant :DEFAULT_EXPIRY, 300 # 5 minutes + + def initialize(*args) + super + @options[:url_encode] = true + self << build + end + + private + + # Will return one of three values, in the following order of precedence: + # + # 1) Seconds since the epoch explicitly passed in the +:expires+ option + # 2) The current time in seconds since the epoch plus the number of seconds passed in + # the +:expires_in+ option + # 3) The current time in seconds since the epoch plus the default number of seconds (60 seconds) + def expires + return @options[:expires] if @options[:expires] + date.to_i + (@options[:expires_in] || DEFAULT_EXPIRY) + end + + # Keep in alphabetical order + def build + "AWSAccessKeyId=#{access_key_id}&Expires=#{expires}&Signature=#{encoded_canonical}" + end + end + + # The CanonicalString is used to generate an encrypted signature, signed with your secrect access key. It is composed of + # data related to the given request for which it provides authentication. This data includes the request method, request headers, + # and the request path. Both Header and QueryString use it to generate their signature. + class CanonicalString < String #:nodoc: + class << self + def default_headers + %w(content-type content-md5) + end + + def interesting_headers + ['content-md5', 'content-type', 'date', amazon_header_prefix] + end + + def amazon_header_prefix + /^#{AMAZON_HEADER_PREFIX}/io + end + end + + attr_reader :request, :headers + + def initialize(request, options = {}) + super() + @request = request + @headers = {} + @options = options + # "For non-authenticated or anonymous requests. A NotImplemented error result code will be returned if + # an authenticated (signed) request specifies a Host: header other than 's3.amazonaws.com'" + # (from http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html) + request['Host'] = DEFAULT_HOST + build + end + + private + def build + self << "#{request.method}\n" + ensure_date_is_valid + + initialize_headers + set_expiry! + + headers.sort_by {|k, _| k}.each do |key, value| + value = value.to_s.strip + self << (key =~ self.class.amazon_header_prefix ? "#{key}:#{value}" : value) + self << "\n" + end + self << path + end + + def initialize_headers + identify_interesting_headers + set_default_headers + end + + def set_expiry! + self.headers['date'] = @options[:expires] if @options[:expires] + end + + def ensure_date_is_valid + request['Date'] ||= Time.now.httpdate + end + + def identify_interesting_headers + request.each do |key, value| + key = key.downcase # Can't modify frozen string so no bang + if self.class.interesting_headers.any? {|header| header === key} + self.headers[key] = value.to_s.strip + end + end + end + + def set_default_headers + self.class.default_headers.each do |header| + self.headers[header] ||= '' + end + end + + def path + [only_path, extract_significant_parameter].compact.join('?') + end + + def extract_significant_parameter + request.path[/[&?](acl|torrent|logging)(?:&|=|$)/, 1] + end + + def only_path + request.path[/^[^?]*/] + end + end + end + end +end \ No newline at end of file diff --git a/lib/aws/s3/base.rb b/lib/aws/s3/base.rb new file mode 100644 index 0000000..8f7c4a3 --- /dev/null +++ b/lib/aws/s3/base.rb @@ -0,0 +1,232 @@ +module AWS #:nodoc: + # AWS::S3 is a Ruby library for Amazon's Simple Storage Service's REST API (http://aws.amazon.com/s3). + # Full documentation of the currently supported API can be found at http://docs.amazonwebservices.com/AmazonS3/2006-03-01. + # + # == Getting started + # + # To get started you need to require 'aws/s3': + # + # % irb -rubygems + # irb(main):001:0> require 'aws/s3' + # # => true + # + # The AWS::S3 library ships with an interactive shell called s3sh. From within it, you have access to all the operations the library exposes from the command line. + # + # % s3sh + # >> Version + # + # Before you can do anything, you must establish a connection using Base.establish_connection!. A basic connection would look something like this: + # + # AWS::S3::Base.establish_connection!( + # :access_key_id => 'abc', + # :secret_access_key => '123' + # ) + # + # The minimum connection options that you must specify are your access key id and your secret access key. + # + # (If you don't already have your access keys, all you need to sign up for the S3 service is an account at Amazon. You can sign up for S3 and get access keys by visiting http://aws.amazon.com/s3.) + # + # For convenience, if you set two special environment variables with the value of your access keys, the console will automatically create a default connection for you. For example: + # + # % cat .amazon_keys + # export AMAZON_ACCESS_KEY_ID='abcdefghijklmnop' + # export AMAZON_SECRET_ACCESS_KEY='1234567891012345' + # + # Then load it in your shell's rc file. + # + # % cat .zshrc + # if [[ -f "$HOME/.amazon_keys" ]]; then + # source "$HOME/.amazon_keys"; + # fi + # + # See more connection details at AWS::S3::Connection::Management::ClassMethods. + module S3 + constant :DEFAULT_HOST, 's3.amazonaws.com' + + # AWS::S3::Base is the abstract super class of all classes who make requests against S3, such as the built in + # Service, Bucket and S3Object classes. It provides methods for making requests, inferring or setting response classes, + # processing request options, and accessing attributes from S3's response data. + # + # Establishing a connection with the Base class is the entry point to using the library: + # + # AWS::S3::Base.establish_connection!(:access_key_id => '...', :secret_access_key => '...') + # + # The :access_key_id and :secret_access_key are the two required connection options. More + # details can be found in the docs for Connection::Management::ClassMethods. + # + # Extensive examples can be found in the README[link:files/README.html]. + class Base + class << self + # Wraps the current connection's request method and picks the appropriate response class to wrap the response in. + # If the response is an error, it will raise that error as an exception. All such exceptions can be caught by rescuing + # their superclass, the ResponseError exception class. + # + # It is unlikely that you would call this method directly. Subclasses of Base have convenience methods for each http request verb + # that wrap calls to request. + def request(verb, path, options = {}, body = nil, attempts = 0, &block) + Service.response = nil + process_options!(options, verb) + response = response_class.new(connection.request(verb, path, options, body, attempts, &block)) + Service.response = response + + Error::Response.new(response.response).error.raise if response.error? + response + # Once in a while, a request to S3 returns an internal error. A glitch in the matrix I presume. Since these + # errors are few and far between the request method will rescue InternalErrors the first three times they encouter them + # and will retry the request again. Most of the time the second attempt will work. + rescue *retry_exceptions + attempts == 3 ? raise : (attempts += 1; retry) + end + + [:get, :post, :put, :delete, :head].each do |verb| + class_eval(<<-EVAL, __FILE__, __LINE__) + def #{verb}(path, headers = {}, body = nil, &block) + request(:#{verb}, path, headers, body, &block) + end + EVAL + end + + # Called when a method which requires a bucket name is called without that bucket name specified. It will try to + # infer the current bucket by looking for it as the subdomain of the current connection's address. If no subdomain + # is found, CurrentBucketNotSpecified will be raised. + # + # MusicBucket.establish_connection! :server => 'jukeboxzero.s3.amazonaws.com' + # MusicBucket.connection.server + # => 'jukeboxzero.s3.amazonaws.com' + # MusicBucket.current_bucket + # => 'jukeboxzero' + # + # Rather than infering the current bucket from the subdomain, the current class' bucket can be explicitly set with + # set_current_bucket_to. + def current_bucket + connection.subdomain or raise CurrentBucketNotSpecified.new(connection.http.address) + end + + # If you plan on always using a specific bucket for certain files, you can skip always having to specify the bucket by creating + # a subclass of Bucket or S3Object and telling it what bucket to use: + # + # class JukeBoxSong < AWS::S3::S3Object + # set_current_bucket_to 'jukebox' + # end + # + # For all methods that take a bucket name as an argument, the current bucket will be used if the bucket name argument is omitted. + # + # other_song = 'baby-please-come-home.mp3' + # JukeBoxSong.store(other_song, open(other_song)) + # + # This time we didn't have to explicitly pass in the bucket name, as the JukeBoxSong class knows that it will + # always use the 'jukebox' bucket. + # + # "Astute readers", as they say, may have noticed that we used the third parameter to pass in the content type, + # rather than the fourth parameter as we had the last time we created an object. If the bucket can be inferred, or + # is explicitly set, as we've done in the JukeBoxSong class, then the third argument can be used to pass in + # options. + # + # Now all operations that would have required a bucket name no longer do. + # + # other_song = JukeBoxSong.find('baby-please-come-home.mp3') + def set_current_bucket_to(name) + raise ArgumentError, "`#{__method__}' must be called on a subclass of #{self.name}" if self == AWS::S3::Base + instance_eval(<<-EVAL) + def current_bucket + '#{name}' + end + EVAL + end + alias_method :current_bucket=, :set_current_bucket_to + + private + + def response_class + FindResponseClass.for(self) + end + + def process_options!(options, verb) + options.replace(RequestOptions.process(options, verb)) + end + + # Using the conventions layed out in the response_class works for more than 80% of the time. + # There are a few edge cases though where we want a given class to wrap its responses in different + # response classes depending on which method is being called. + def respond_with(klass) + eval(<<-EVAL, binding, __FILE__, __LINE__) + def new_response_class + #{klass} + end + + class << self + alias_method :old_response_class, :response_class + alias_method :response_class, :new_response_class + end + EVAL + + yield + ensure + # Restore the original version + eval(<<-EVAL, binding, __FILE__, __LINE__) + class << self + alias_method :response_class, :old_response_class + end + EVAL + end + + def bucket_name(name) + name || current_bucket + end + + def retry_exceptions + [InternalError, RequestTimeout] + end + + class RequestOptions < Hash #:nodoc: + attr_reader :options, :verb + + class << self + def process(*args, &block) + new(*args, &block).process! + end + end + + def initialize(options, verb = :get) + @options = options.to_normalized_options + @verb = verb + super() + end + + def process! + set_access_controls! if verb == :put + replace(options) + end + + private + def set_access_controls! + ACL::OptionProcessor.process!(options) + end + end + end + + def initialize(attributes = {}) #:nodoc: + @attributes = attributes + end + + private + attr_reader :attributes + + def connection + self.class.connection + end + + def http + connection.http + end + + def request(*args, &block) + self.class.request(*args, &block) + end + + def method_missing(method, *args, &block) + attributes[method.to_s] || attributes[method] || super + end + end + end +end diff --git a/lib/aws/s3/bittorrent.rb b/lib/aws/s3/bittorrent.rb new file mode 100644 index 0000000..46bd130 --- /dev/null +++ b/lib/aws/s3/bittorrent.rb @@ -0,0 +1,58 @@ +module AWS + module S3 + # Objects on S3 can be distributed via the BitTorrent file sharing protocol. + # + # You can get a torrent file for an object by calling torrent_for: + # + # S3Object.torrent_for 'kiss.jpg', 'marcel' + # + # Or just call the torrent method if you already have the object: + # + # song = S3Object.find 'kiss.jpg', 'marcel' + # song.torrent + # + # Calling grant_torrent_access_to on a object will allow anyone to anonymously + # fetch the torrent file for that object: + # + # S3Object.grant_torrent_access_to 'kiss.jpg', 'marcel' + # + # Anonymous requests to + # + # http://s3.amazonaws.com/marcel/kiss.jpg?torrent + # + # will serve up the torrent file for that object. + module BitTorrent + def self.included(klass) #:nodoc: + klass.extend ClassMethods + end + + # Adds methods to S3Object for accessing the torrent of a given object. + module ClassMethods + # Returns the torrent file for the object with the given key. + def torrent_for(key, bucket = nil) + get(path!(bucket, key) << '?torrent').body + end + alias_method :torrent, :torrent_for + + # Grants access to the object with the given key to be accessible as a torrent. + def grant_torrent_access_to(key, bucket = nil) + policy = acl(key, bucket) + return true if policy.grants.include?(:public_read) + policy.grants << ACL::Grant.grant(:public_read) + acl(key, bucket, policy) + end + alias_method :grant_torrent_access, :grant_torrent_access_to + end + + # Returns the torrent file for the object. + def torrent + self.class.torrent_for(key, bucket.name) + end + + # Grants torrent access publicly to anyone who requests it on this object. + def grant_torrent_access + self.class.grant_torrent_access_to(key, bucket.name) + end + end + end +end \ No newline at end of file diff --git a/lib/aws/s3/bucket.rb b/lib/aws/s3/bucket.rb new file mode 100644 index 0000000..21c3888 --- /dev/null +++ b/lib/aws/s3/bucket.rb @@ -0,0 +1,320 @@ +module AWS + module S3 + # Buckets are containers for objects (the files you store on S3). To create a new bucket you just specify its name. + # + # # Pick a unique name, or else you'll get an error + # # if the name is already taken. + # Bucket.create('jukebox') + # + # Bucket names must be unique across the entire S3 system, sort of like domain names across the internet. If you try + # to create a bucket with a name that is already taken, you will get an error. + # + # Assuming the name you chose isn't already taken, your new bucket will now appear in the bucket list: + # + # Service.buckets + # # => [#"jukebox"}>] + # + # Once you have succesfully created a bucket you can you can fetch it by name using Bucket.find. + # + # music_bucket = Bucket.find('jukebox') + # + # The bucket that is returned will contain a listing of all the objects in the bucket. + # + # music_bucket.objects.size + # # => 0 + # + # If all you are interested in is the objects of the bucket, you can get to them directly using Bucket.objects. + # + # Bucket.objects('jukebox').size + # # => 0 + # + # By default all objects will be returned, though there are several options you can use to limit what is returned, such as + # specifying that only objects whose name is after a certain place in the alphabet be returned, and etc. Details about these options can + # be found in the documentation for Bucket.find. + # + # To add an object to a bucket you specify the name of the object, its value, and the bucket to put it in. + # + # file = 'black-flowers.mp3' + # S3Object.store(file, open(file), 'jukebox') + # + # You'll see your file has been added to it: + # + # music_bucket.objects + # # => [#] + # + # You can treat your bucket like a hash and access objects by name: + # + # jukebox['black-flowers.mp3'] + # # => # + # + # In the event that you want to delete a bucket, you can use Bucket.delete. + # + # Bucket.delete('jukebox') + # + # Keep in mind, like unix directories, you can not delete a bucket unless it is empty. Trying to delete a bucket + # that contains objects will raise a BucketNotEmpty exception. + # + # Passing the :force => true option to delete will take care of deleting all the bucket's objects for you. + # + # Bucket.delete('photos', :force => true) + # # => true + class Bucket < Base + class << self + # Creates a bucket named name. + # + # Bucket.create('jukebox') + # + # Your bucket name must be unique across all of S3. If the name + # you request has already been taken, you will get a 409 Conflict response, and a BucketAlreadyExists exception + # will be raised. + # + # By default new buckets have their access level set to private. You can override this using the :access option. + # + # Bucket.create('internet_drop_box', :access => :public_read_write) + # + # The full list of access levels that you can set on Bucket and S3Object creation are listed in the README[link:files/README.html] + # in the section called 'Setting access levels'. + def create(name, options = {}) + validate_name!(name) + put("/#{name}", options).success? + end + + # Fetches the bucket named name. + # + # Bucket.find('jukebox') + # + # If a default bucket is inferable from the current connection's subdomain, or if set explicitly with Base.set_current_bucket, + # it will be used if no bucket is specified. + # + # MusicBucket.current_bucket + # => 'jukebox' + # MusicBucket.find.name + # => 'jukebox' + # + # By default all objects contained in the bucket will be returned (sans their data) along with the bucket. + # You can access your objects using the Bucket#objects method. + # + # Bucket.find('jukebox').objects + # + # There are several options which allow you to limit which objects are retrieved. The list of object filtering options + # are listed in the documentation for Bucket.objects. + def find(name = nil, options = {}) + new(get(path(name, options)).bucket) + end + + # Return just the objects in the bucket named name. + # + # By default all objects of the named bucket will be returned. There are options, though, for filtering + # which objects are returned. + # + # === Object filtering options + # + # * :max_keys - The maximum number of keys you'd like to see in the response body. + # The server may return fewer than this many keys, but will not return more. + # + # Bucket.objects('jukebox').size + # # => 3 + # Bucket.objects('jukebox', :max_keys => 1).size + # # => 1 + # + # * :prefix - Restricts the response to only contain results that begin with the specified prefix. + # + # Bucket.objects('jukebox') + # # => [, , ] + # Bucket.objects('jukebox', :prefix => 'classical') + # # => [] + # + # * :marker - Marker specifies where in the result set to resume listing. It restricts the response + # to only contain results that occur alphabetically _after_ the value of marker. To retrieve the next set of results, + # use the last key from the current page of results as the marker in your next request. + # + # # Skip 'mahler' + # Bucket.objects('jukebox', :marker => 'mb') + # # => [] + # + # === Examples + # + # # Return no more than 2 objects whose key's are listed alphabetically after the letter 'm'. + # Bucket.objects('jukebox', :marker => 'm', :max_keys => 2) + # # => [, ] + # + # # Return no more than 2 objects whose key's are listed alphabetically after the letter 'm' and have the 'jazz' prefix. + # Bucket.objects('jukebox', :marker => 'm', :max_keys => 2, :prefix => 'jazz') + # # => [] + def objects(name = nil, options = {}) + find(name, options).object_cache + end + + # Deletes the bucket named name. + # + # All objects in the bucket must be deleted before the bucket can be deleted. If the bucket is not empty, + # BucketNotEmpty will be raised. + # + # You can side step this issue by passing the :force => true option to delete which will take care of + # emptying the bucket before deleting it. + # + # Bucket.delete('photos', :force => true) + # + # Only the owner of a bucket can delete a bucket, regardless of the bucket's access control policy. + def delete(name = nil, options = {}) + name = path(name) + find(name).delete_all if options[:force] + # A bit confusing. Calling super actually makes makes an HTTP DELETE request. The delete method is + # defined in the Base class. It happens to have the same name. + super(name).success? + end + + # List all your buckets. This is a convenient wrapper around AWS::S3::Service.buckets. + def list(reload = false) + Service.buckets(reload) + end + + private + def validate_name!(name) + raise InvalidBucketName.new(name) unless name =~ /^[-\w.]{3,255}$/ + end + + def path(name, options = {}) + if name.is_a?(Hash) + options = name + name = nil + end + "/#{bucket_name(name)}#{RequestOptions.process(options).to_query_string}" + end + end + + attr_reader :object_cache #:nodoc: + + include Enumerable + + def initialize(attributes = {}) #:nodoc: + super + @object_cache = [] + build_contents! + end + + # Fetches the object named object_key, or nil if the bucket does not contain an object with the + # specified key. + # + # bucket.objects + # => [#, + # #] + # bucket['beluga_baby.jpg'] + # => # + def [](object_key) + detect {|file| file.key == object_key.to_s} + end + + # Initializes a new S3Object belonging to the current bucket. + # + # object = bucket.new_object + # object.value = data + # object.key = 'classical/mahler.mp3' + # object.store + # bucket.objects.include?(object) + # => true + def new_object(attributes = {}) + object = S3Object.new(attributes) + register(object) + object + end + + # List S3Object's of the bucket. + # + # Once fetched the objects will be cached. You can reload the objects by passing :reload. + # + # bucket.objects(:reload) + # + # You can also filter the objects using the same options listed in Bucket.objects. + # + # bucket.objects(:prefix => 'jazz') + # + # Using these filtering options will implictly reload the objects. + # + # To reclaim all the objects for the bucket you can pass in :reload again. + def objects(options = {}) + if options.is_a?(Hash) + reload = !options.empty? + else + reload = options + options = {} + end + + reload!(options) if reload || object_cache.empty? + object_cache + end + + # Iterates over the objects in the bucket. + # + # bucket.each do |object| + # # Do something with the object ... + # end + def each(&block) + # Dup the collection since we might be destructively modifying the object_cache during the iteration. + objects.dup.each(&block) + end + + # Returns true if there are no objects in the bucket. + def empty? + objects.empty? + end + + # Returns the number of objects in the bucket. + def size + objects.size + end + + # Deletes the bucket. See its class method counter part Bucket.delete for caveats about bucket deletion and how to ensure + # a bucket is deleted no matter what. + def delete(options = {}) + self.class.delete(name, options) + end + + # Delete all files in the bucket. Use with caution. Can not be undone. + def delete_all + each do |object| + object.delete + end + self + end + alias_method :clear, :delete_all + + # Buckets observe their objects and have this method called when one of their objects + # is either stored or deleted. + def update(action, object) #:nodoc: + case action + when :stored then add object unless objects.include?(object) + when :deleted then object_cache.delete(object) + end + end + + private + def build_contents! + return unless has_contents? + attributes.delete('contents').each do |content| + add new_object(content) + end + end + + def has_contents? + attributes.has_key?('contents') + end + + def add(object) + register(object) + object_cache << object + end + + def register(object) + object.bucket = self + end + + def reload!(options = {}) + object_cache.clear + self.class.objects(name, options).each do |object| + add object + end + end + end + end +end \ No newline at end of file diff --git a/lib/aws/s3/connection.rb b/lib/aws/s3/connection.rb new file mode 100644 index 0000000..44ca323 --- /dev/null +++ b/lib/aws/s3/connection.rb @@ -0,0 +1,314 @@ +module AWS + module S3 + class Connection #:nodoc: + class << self + def connect(options = {}) + new(options) + end + + def prepare_path(path) + path = path.remove_extended unless path.utf8? + URI.escape(path) + end + end + + attr_reader :access_key_id, :secret_access_key, :http, :options + + # Creates a new connection. Connections make the actual requests to S3, though these requests are usually + # called from subclasses of Base. + # + # For details on establishing connections, check the Connection::Management::ClassMethods. + def initialize(options = {}) + @options = Options.new(options) + connect + end + + def request(verb, path, headers = {}, body = nil, attempts = 0, &block) + body.rewind if body.respond_to?(:rewind) unless attempts.zero? + + requester = Proc.new do + path = self.class.prepare_path(path) + request = request_method(verb).new(path, headers) + ensure_content_type!(request) + add_user_agent!(request) + authenticate!(request) + if body + if body.respond_to?(:read) + request.body_stream = body + request.content_length = body.respond_to?(:lstat) ? body.lstat.size : body.size + else + request.body = body + end + end + http.request(request, &block) + end + + if persistent? + http.start unless http.started? + requester.call + else + http.start(&requester) + end + rescue Errno::EPIPE, Timeout::Error, Errno::EPIPE, Errno::EINVAL + @http = create_connection + attempts == 3 ? raise : (attempts += 1; retry) + end + + def url_for(path, options = {}) + authenticate = options.delete(:authenticated) + # Default to true unless explicitly false + authenticate = true if authenticate.nil? + path = self.class.prepare_path(path) + request = request_method(:get).new(path, {}) + query_string = query_string_authentication(request, options) + returning "#{protocol(options)}#{http.address}#{port_string}#{path}" do |url| + url << "?#{query_string}" if authenticate + end + end + + def subdomain + http.address[/^([^.]+).#{DEFAULT_HOST}$/, 1] + end + + def persistent? + options[:persistent] + end + + def protocol(options = {}) + (options[:use_ssl] || http.use_ssl?) ? 'https://' : 'http://' + end + + private + def extract_keys! + missing_keys = [] + extract_key = Proc.new {|key| options[key] || (missing_keys.push(key); nil)} + @access_key_id = extract_key[:access_key_id] + @secret_access_key = extract_key[:secret_access_key] + raise MissingAccessKey.new(missing_keys) unless missing_keys.empty? + end + + def create_connection + http = http_class.new(options[:server], options[:port]) + http.use_ssl = !options[:use_ssl].nil? || options[:port] == 443 + http.verify_mode = OpenSSL::SSL::VERIFY_NONE + http + end + + def http_class + if options.connecting_through_proxy? + Net::HTTP::Proxy(*options.proxy_settings) + else + Net::HTTP + end + end + + def connect + extract_keys! + @http = create_connection + end + + def port_string + default_port = options[:use_ssl] ? 443 : 80 + http.port == default_port ? '' : ":#{http.port}" + end + + def ensure_content_type!(request) + request['Content-Type'] ||= 'binary/octet-stream' + end + + # Just do Header authentication for now + def authenticate!(request) + request['Authorization'] = Authentication::Header.new(request, access_key_id, secret_access_key) + end + + def add_user_agent!(request) + request['User-Agent'] ||= "AWS::S3/#{Version}" + end + + def query_string_authentication(request, options = {}) + Authentication::QueryString.new(request, access_key_id, secret_access_key, options) + end + + def request_method(verb) + Net::HTTP.const_get(verb.to_s.capitalize) + end + + def method_missing(method, *args, &block) + options[method] || super + end + + module Management #:nodoc: + def self.included(base) + base.cattr_accessor :connections + base.connections = {} + base.extend ClassMethods + end + + # Manage the creation and destruction of connections for AWS::S3::Base and its subclasses. Connections are + # created with establish_connection!. + module ClassMethods + # Creates a new connection with which to make requests to the S3 servers for the calling class. + # + # AWS::S3::Base.establish_connection!(:access_key_id => '...', :secret_access_key => '...') + # + # You can set connections for every subclass of AWS::S3::Base. Once the initial connection is made on + # Base, all subsequent connections will inherit whatever values you don't specify explictly. This allows you to + # customize details of the connection, such as what server the requests are made to, by just specifying one + # option. + # + # AWS::S3::Bucket.established_connection!(:use_ssl => true) + # + # The Bucket connection would inherit the :access_key_id and the :secret_access_key from + # Base's connection. Unlike the Base connection, all Bucket requests would be made over SSL. + # + # == Required arguments + # + # * :access_key_id - The access key id for your S3 account. Provided by Amazon. + # * :secret_access_key - The secret access key for your S3 account. Provided by Amazon. + # + # If any of these required arguments is missing, a MissingAccessKey exception will be raised. + # + # == Optional arguments + # + # * :server - The server to make requests to. You can use this to specify your bucket in the subdomain, + # or your own domain's cname if you are using virtual hosted buckets. Defaults to s3.amazonaws.com. + # * :port - The port to the requests should be made on. Defaults to 80 or 443 if the :use_ssl + # argument is set. + # * :use_ssl - Whether requests should be made over SSL. If set to true, the :port argument + # will be implicitly set to 443, unless specified otherwise. Defaults to false. + # * :persistent - Whether to use a persistent connection to the server. Having this on provides around a two fold + # performance increase but for long running processes some firewalls may find the long lived connection suspicious and close the connection. + # If you run into connection errors, try setting :persistent to false. Defaults to true. + # * :proxy - If you need to connect through a proxy, you can specify your proxy settings by specifying a :host, :port, :user, and :password + # with the :proxy option. + # The :host setting is required if specifying a :proxy. + # + # AWS::S3::Bucket.established_connection!(:proxy => { + # :host => '...', :port => 8080, :user => 'marcel', :password => 'secret' + # }) + def establish_connection!(options = {}) + # After you've already established the default connection, just specify + # the difference for subsequent connections + options = default_connection.options.merge(options) if connected? + connections[connection_name] = Connection.connect(options) + end + + # Returns the connection for the current class, or Base's default connection if the current class does not + # have its own connection. + # + # If not connection has been established yet, NoConnectionEstablished will be raised. + def connection + if connected? + connections[connection_name] || default_connection + else + raise NoConnectionEstablished + end + end + + # Returns true if a connection has been made yet. + def connected? + !connections.empty? + end + + # Removes the connection for the current class. If there is no connection for the current class, the default + # connection will be removed. + def disconnect(name = connection_name) + name = default_connection unless connections.has_key?(name) + connection = connections[name] + connection.http.finish if connection.persistent? + connections.delete(name) + end + + # Clears *all* connections, from all classes, with prejudice. + def disconnect! + connections.each_key {|connection| disconnect(connection)} + end + + private + def connection_name + name + end + + def default_connection_name + 'AWS::S3::Base' + end + + def default_connection + connections[default_connection_name] + end + end + end + + class Options < Hash #:nodoc: + class << self + def valid_options + [:access_key_id, :secret_access_key, :server, :port, :use_ssl, :persistent, :proxy] + end + end + + attr_reader :options + def initialize(options = {}) + super() + @options = options + validate! + extract_proxy_settings! + extract_persistent! + extract_server! + extract_port! + extract_remainder! + end + + def connecting_through_proxy? + !self[:proxy].nil? + end + + def proxy_settings + proxy_setting_keys.map do |proxy_key| + self[:proxy][proxy_key] + end + end + + private + def proxy_setting_keys + [:host, :port, :user, :password] + end + + def missing_proxy_settings? + !self[:proxy].keys.include?(:host) + end + + def extract_persistent! + self[:persistent] = options.has_key?(:persitent) ? options[:persitent] : true + end + + def extract_proxy_settings! + self[:proxy] = options.delete(:proxy) if options.include?(:proxy) + validate_proxy_settings! + end + + def extract_server! + self[:server] = options.delete(:server) || DEFAULT_HOST + end + + def extract_port! + self[:port] = options.delete(:port) || (options[:use_ssl] ? 443 : 80) + end + + def extract_remainder! + update(options) + end + + def validate! + invalid_options = options.keys.select {|key| !self.class.valid_options.include?(key)} + raise InvalidConnectionOption.new(invalid_options) unless invalid_options.empty? + end + + def validate_proxy_settings! + if connecting_through_proxy? && missing_proxy_settings? + raise ArgumentError, "Missing proxy settings. Must specify at least :host." + end + end + end + end + end +end diff --git a/lib/aws/s3/error.rb b/lib/aws/s3/error.rb new file mode 100644 index 0000000..f4c4011 --- /dev/null +++ b/lib/aws/s3/error.rb @@ -0,0 +1,69 @@ +module AWS + module S3 + # Anything you do that makes a request to S3 could result in an error. If it does, the AWS::S3 library will raise an exception + # specific to the error. All exception that are raised as a result of a request returning an error response inherit from the + # ResponseError exception. So should you choose to rescue any such exception, you can simple rescue ResponseError. + # + # Say you go to delete a bucket, but the bucket turns out to not be empty. This results in a BucketNotEmpty error (one of the many + # errors listed at http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ErrorCodeList.html): + # + # begin + # Bucket.delete('jukebox') + # rescue ResponseError => error + # # ... + # end + # + # Once you've captured the exception, you can extract the error message from S3, as well as the full error response, which includes + # things like the HTTP response code: + # + # error + # # => # + # error.message + # # => "The bucket you tried to delete is not empty" + # error.response.code + # # => 409 + # + # You could use this information to redisplay the error in a way you see fit, or just to log the error and continue on. + class Error + #:stopdoc: + attr_accessor :response + def initialize(error, response = nil) + @error = error + @response = response + @container = AWS::S3 + find_or_create_exception! + end + + def raise + Kernel.raise exception.new(message, response) + end + + private + attr_reader :error, :exception, :container + + def find_or_create_exception! + @exception = container.const_defined?(code) ? find_exception : create_exception + end + + def find_exception + exception_class = container.const_get(code) + Kernel.raise ExceptionClassClash.new(exception_class) unless exception_class.ancestors.include?(ResponseError) + exception_class + end + + def create_exception + container.const_set(code, Class.new(ResponseError)) + end + + def method_missing(method, *args, &block) + # We actually want nil if the attribute is nil. So we use has_key? rather than [] + ||. + if error.has_key?(method.to_s) + error[method.to_s] + else + super + end + end + end + end +end +#:startdoc: \ No newline at end of file diff --git a/lib/aws/s3/exceptions.rb b/lib/aws/s3/exceptions.rb new file mode 100644 index 0000000..6e5a09c --- /dev/null +++ b/lib/aws/s3/exceptions.rb @@ -0,0 +1,133 @@ +module AWS + module S3 + + # Abstract super class of all AWS::S3 exceptions + class S3Exception < StandardError + end + + # All responses with a code between 300 and 599 that contain an body are wrapped in an + # ErrorResponse which contains an Error object. This Error class generates a custom exception with the name + # of the xml Error and its message. All such runtime generated exception classes descend from ResponseError + # and contain the ErrorResponse object so that all code that makes a request can rescue ResponseError and get + # access to the ErrorResponse. + class ResponseError < S3Exception + attr_reader :response + def initialize(message, response) + @response = response + super(message) + end + end + + #:stopdoc: + + # Most ResponseError's are created just time on a need to have basis, but we explicitly define the + # InternalError exception because we want to explicitly rescue InternalError in some cases. + class InternalError < ResponseError + end + + class NoSuchKey < ResponseError + end + + class RequestTimeout < ResponseError + end + + # Abstract super class for all invalid options. + class InvalidOption < S3Exception + end + + # Raised if an invalid value is passed to the :access option when creating a Bucket or an S3Object. + class InvalidAccessControlLevel < InvalidOption + def initialize(valid_levels, access_level) + super("Valid access control levels are #{valid_levels.inspect}. You specified `#{access_level}'.") + end + end + + # Raised if either the access key id or secret access key arguments are missing when establishing a connection. + class MissingAccessKey < InvalidOption + def initialize(missing_keys) + key_list = missing_keys.map {|key| key.to_s}.join(' and the ') + super("You did not provide both required access keys. Please provide the #{key_list}.") + end + end + + # Raised if a request is attempted before any connections have been established. + class NoConnectionEstablished < S3Exception + end + + # Raised if an unrecognized option is passed when establishing a connection. + class InvalidConnectionOption < InvalidOption + def initialize(invalid_options) + message = "The following connection options are invalid: #{invalid_options.join(', ')}. " + + "The valid connection options are: #{Connection::Options.valid_options.join(', ')}." + super(message) + end + end + + # Raised if an invalid bucket name is passed when creating a new Bucket. + class InvalidBucketName < S3Exception + def initialize(invalid_name) + message = "`#{invalid_name}' is not a valid bucket name. " + + "Bucket names must be between 3 and 255 bytes and " + + "can contain letters, numbers, dashes and underscores." + super(message) + end + end + + # Raised if an invalid key name is passed when creating an S3Object. + class InvalidKeyName < S3Exception + def initialize(invalid_name) + message = "`#{invalid_name}' is not a valid key name. " + + "Key names must be no more than 1024 bytes long." + super(message) + end + end + + # Raised if an invalid value is assigned to an S3Object's specific metadata name. + class InvalidMetadataValue < S3Exception + def initialize(invalid_names) + message = "The following metadata names have invalid values: #{invalid_names.join(', ')}. " + + "Metadata can not be larger than 2kilobytes." + super(message) + end + end + + # Raised if the current bucket can not be inferred when not explicitly specifying the target bucket in the calling + # method's arguments. + class CurrentBucketNotSpecified < S3Exception + def initialize(address) + message = "No bucket name can be inferred from your current connection's address (`#{address}')" + super(message) + end + end + + # Raised when an orphaned S3Object belonging to no bucket tries to access its (non-existant) bucket. + class NoBucketSpecified < S3Exception + def initialize + super('The current object must have its bucket set') + end + end + + # Raised if an attempt is made to save an S3Object that does not have a key set. + class NoKeySpecified < S3Exception + def initialize + super('The current object must have its key set') + end + end + + # Raised if you try to save a deleted object. + class DeletedObject < S3Exception + def initialize + super('You can not save a deleted object') + end + end + + class ExceptionClassClash < S3Exception #:nodoc: + def initialize(klass) + message = "The exception class you tried to create (`#{klass}') exists and is not an exception" + super(message) + end + end + + #:startdoc: + end +end \ No newline at end of file diff --git a/lib/aws/s3/extensions.rb b/lib/aws/s3/extensions.rb new file mode 100644 index 0000000..c246d5d --- /dev/null +++ b/lib/aws/s3/extensions.rb @@ -0,0 +1,323 @@ +#:stopdoc: + +class Hash + def to_query_string(include_question_mark = true) + query_string = '' + unless empty? + query_string << '?' if include_question_mark + query_string << inject([]) do |params, (key, value)| + params << "#{key}=#{value}" + end.join('&') + end + query_string + end + + def to_normalized_options + # Convert all option names to downcased strings, and replace underscores with hyphens + inject({}) do |normalized_options, (name, value)| + normalized_options[name.to_header] = value.to_s + normalized_options + end + end + + def to_normalized_options! + replace(to_normalized_options) + end +end + +class String + def previous! + self[-1] -= 1 + self + end + + def previous + dup.previous! + end + + def to_header + downcase.tr('_', '-') + end + + # ActiveSupport adds an underscore method to String so let's just use that one if + # we find that the method is already defined + def underscore + gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2'). + gsub(/([a-z\d])([A-Z])/,'\1_\2'). + downcase + end unless public_method_defined? :underscore + + def utf8? + scan(/[^\x00-\xa0]/u) { |s| s.unpack('U') } + true + rescue ArgumentError + false + end + + # All paths in in S3 have to be valid unicode so this takes care of + # cleaning up any strings that aren't valid utf-8 according to String#utf8? + def remove_extended! + gsub!(/[\x80-\xFF]/) { "%02X" % $&[0] } + end + + def remove_extended + dup.remove_extended! + end +end + +class CoercibleString < String + class << self + def coerce(string) + new(string).coerce + end + end + + def coerce + case self + when 'true': true + when 'false': false + # Don't coerce numbers that start with zero + when /^[1-9]+\d*$/: Integer(self) + when datetime_format: Time.parse(self) + else + self + end + end + + private + # Lame hack since Date._parse is so accepting. S3 dates are of the form: '2006-10-29T23:14:47.000Z' + # so unless the string looks like that, don't even try, otherwise it might convert an object's + # key from something like '03 1-2-3-Apple-Tree.mp3' to Sat Feb 03 00:00:00 CST 2001. + def datetime_format + /^\d{4}-\d{2}-\d{2}\w\d{2}:\d{2}:\d{2}/ + end +end + +class Symbol + def to_header + to_s.to_header + end +end + +module Kernel + def __method__(depth = 0) + caller[depth][/`([^']+)'/, 1] + end if RUBY_VERSION < '1.9' + + def memoize(reload = false, storage = nil) + storage = "@#{storage || __method__(1)}" + if reload + instance_variable_set(storage, nil) + else + if cache = instance_variable_get(storage) + return cache + end + end + instance_variable_set(storage, yield) + end + + def require_library_or_gem(library) + require library + rescue LoadError => library_not_installed + begin + require 'rubygems' + require library + rescue LoadError + raise library_not_installed + end + end +end + +class Object + def returning(value) + yield(value) + value + end +end + +class Module + def memoized(method_name) + original_method = "unmemoized_#{method_name}_#{Time.now.to_i}" + alias_method original_method, method_name + module_eval(<<-EVAL, __FILE__, __LINE__) + def #{method_name}(reload = false, *args, &block) + memoize(reload) do + send(:#{original_method}, *args, &block) + end + end + EVAL + end + + def constant(name, value) + unless const_defined?(name) + const_set(name, value) + module_eval(<<-EVAL, __FILE__, __LINE__) + def self.#{name.to_s.downcase} + #{name.to_s} + end + EVAL + end + end + + # Transforms MarcelBucket into + # + # class MarcelBucket < AWS::S3::Bucket + # set_current_bucket_to 'marcel' + # end + def const_missing_from_s3_library(sym) + if sym.to_s =~ /^(\w+)(Bucket|S3Object)$/ + const = const_set(sym, Class.new(AWS::S3.const_get($2))) + const.current_bucket = $1.underscore + const + else + const_missing_not_from_s3_library(sym) + end + end + alias_method :const_missing_not_from_s3_library, :const_missing + alias_method :const_missing, :const_missing_from_s3_library +end + + +class Class # :nodoc: + def cattr_reader(*syms) + syms.flatten.each do |sym| + class_eval(<<-EOS, __FILE__, __LINE__) + unless defined? @@#{sym} + @@#{sym} = nil + end + + def self.#{sym} + @@#{sym} + end + + def #{sym} + @@#{sym} + end + EOS + end + end + + def cattr_writer(*syms) + syms.flatten.each do |sym| + class_eval(<<-EOS, __FILE__, __LINE__) + unless defined? @@#{sym} + @@#{sym} = nil + end + + def self.#{sym}=(obj) + @@#{sym} = obj + end + + def #{sym}=(obj) + @@#{sym} = obj + end + EOS + end + end + + def cattr_accessor(*syms) + cattr_reader(*syms) + cattr_writer(*syms) + end +end if Class.instance_methods(false).grep(/^cattr_(?:reader|writer|accessor)$/).empty? + +module SelectiveAttributeProxy + def self.included(klass) + klass.extend(ClassMethods) + klass.class_eval(<<-EVAL, __FILE__, __LINE__) + cattr_accessor :attribute_proxy + cattr_accessor :attribute_proxy_options + + # Default name for attribute storage + self.attribute_proxy = :attributes + self.attribute_proxy_options = {:exclusively => true} + + private + # By default proxy all attributes + def proxiable_attribute?(name) + return true unless self.class.attribute_proxy_options[:exclusively] + send(self.class.attribute_proxy).has_key?(name) + end + + def method_missing(method, *args, &block) + # Autovivify attribute storage + if method == self.class.attribute_proxy + ivar = "@\#{method}" + instance_variable_set(ivar, {}) unless instance_variable_get(ivar).is_a?(Hash) + instance_variable_get(ivar) + # Delegate to attribute storage + elsif method.to_s =~ /^(\\w+)(=?)$/ && proxiable_attribute?($1) + attributes_hash_name = self.class.attribute_proxy + $2.empty? ? send(attributes_hash_name)[$1] : send(attributes_hash_name)[$1] = args.first + else + super + end + end + EVAL + end + + module ClassMethods + def proxy_to(attribute_name, options = {}) + if attribute_name.is_a?(Hash) + options = attribute_name + else + self.attribute_proxy = attribute_name + end + self.attribute_proxy_options = options + end + end +end + +# When streaming data up, Net::HTTPGenericRequest hard codes a chunk size of 1k. For large files this +# is an unfortunately low chunk size, so here we make it use a much larger default size and move it into a method +# so that the implementation of send_request_with_body_stream doesn't need to be changed to change the chunk size (at least not anymore +# than I've already had to...). +module Net + class HTTPGenericRequest + def send_request_with_body_stream(sock, ver, path, f) + raise ArgumentError, "Content-Length not given and Transfer-Encoding is not `chunked'" unless content_length() or chunked? + unless content_type() + warn 'net/http: warning: Content-Type did not set; using application/x-www-form-urlencoded' if $VERBOSE + set_content_type 'application/x-www-form-urlencoded' + end + write_header sock, ver, path + if chunked? + while s = f.read(chunk_size) + sock.write(sprintf("%x\r\n", s.length) << s << "\r\n") + end + sock.write "0\r\n\r\n" + else + while s = f.read(chunk_size) + sock.write s + end + end + end + + def chunk_size + 1048576 # 1 megabyte + end + end + + # Net::HTTP before 1.8.4 doesn't have the use_ssl? method or the Delete request type + class HTTP + def use_ssl? + @use_ssl + end unless public_method_defined? :use_ssl? + + class Delete < HTTPRequest + METHOD = 'DELETE' + REQUEST_HAS_BODY = false + RESPONSE_HAS_BODY = true + end unless const_defined? :Delete + end +end + +class XmlGenerator < String #:nodoc: + attr_reader :xml + def initialize + @xml = Builder::XmlMarkup.new(:indent => 2, :target => self) + super() + build + end +end +#:startdoc: diff --git a/lib/aws/s3/logging.rb b/lib/aws/s3/logging.rb new file mode 100644 index 0000000..6a8cdfc --- /dev/null +++ b/lib/aws/s3/logging.rb @@ -0,0 +1,306 @@ +module AWS + module S3 + # A bucket can be set to log the requests made on it. By default logging is turned off. You can check if a bucket has logging enabled: + # + # Bucket.logging_enabled_for? 'jukebox' + # # => false + # + # Enabling it is easy: + # + # Bucket.enable_logging_for('jukebox') + # + # Unless you specify otherwise, logs will be written to the bucket you want to log. The logs are just like any other object. By default they will start with the prefix 'log-'. You can customize what bucket you want the logs to be delivered to, as well as customize what the log objects' key is prefixed with by setting the target_bucket and target_prefix option: + # + # Bucket.enable_logging_for( + # 'jukebox', 'target_bucket' => 'jukebox-logs' + # ) + # + # Now instead of logging right into the jukebox bucket, the logs will go into the bucket called jukebox-logs. + # + # Once logs have accumulated, you can access them using the logs method: + # + # pp Bucket.logs('jukebox') + # [#, + # #, + # #] + # + # Each log has a lines method that gives you information about each request in that log. All the fields are available + # as named methods. More information is available in Logging::Log::Line. + # + # logs = Bucket.logs('jukebox') + # log = logs.first + # line = log.lines.first + # line.operation + # # => 'REST.GET.LOGGING_STATUS' + # line.request_uri + # # => 'GET /jukebox?logging HTTP/1.1' + # line.remote_ip + # # => "67.165.183.125" + # + # Disabling logging is just as simple as enabling it: + # + # Bucket.disable_logging_for('jukebox') + module Logging + # Logging status captures information about the calling bucket's logging settings. If logging is enabled for the bucket + # the status object will indicate what bucket the logs are written to via the target_bucket method as well as + # the logging key prefix with via target_prefix. + # + # See the documentation for Logging::Management::ClassMethods for more information on how to get the logging status of a bucket. + class Status + include SelectiveAttributeProxy + attr_reader :enabled + alias_method :logging_enabled?, :enabled + + def initialize(attributes = {}) #:nodoc: + attributes = {'target_bucket' => nil, 'target_prefix' => nil}.merge(attributes) + @enabled = attributes.has_key?('logging_enabled') + @attributes = attributes.delete('logging_enabled') || attributes + end + + def to_xml #:nodoc: + Builder.new(self).to_s + end + + private + attr_reader :attributes + + class Builder < XmlGenerator #:nodoc: + attr_reader :logging_status + def initialize(logging_status) + @logging_status = logging_status + super() + end + + def build + xml.tag!('BucketLoggingStatus', 'xmlns' => 'http://s3.amazonaws.com/doc/2006-03-01/') do + if logging_status.target_bucket && logging_status.target_prefix + xml.LoggingEnabled do + xml.TargetBucket logging_status.target_bucket + xml.TargetPrefix logging_status.target_prefix + end + end + end + end + end + end + + # A bucket log exposes requests made on the given bucket. Lines of the log represent a single request. The lines of a log + # can be accessed with the lines method. + # + # log = Bucket.logs_for('marcel').first + # log.lines + # + # More information about the logged requests can be found in the documentation for Log::Line. + class Log + def initialize(log_object) #:nodoc: + @log = log_object + end + + # Returns the lines for the log. Each line is wrapped in a Log::Line. + def lines + log.value.map {|line| Line.new(line)} + end + memoized :lines + + def inspect #:nodoc: + "#<%s:0x%s '%s'>" % [self.class.name, object_id, log.path] + end + + private + attr_reader :log + + # Each line of a log exposes the raw line, but it also has method accessors for all the fields of the logged request. + # + # The list of supported log line fields are listed in the S3 documentation: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/LogFormat.html + # + # line = log.lines.first + # line.remote_ip + # # => '72.21.206.5' + # + # If a certain field does not apply to a given request (for example, the key field does not apply to a bucket request), + # or if it was unknown or unavailable, it will return nil. + # + # line.operation + # # => 'REST.GET.BUCKET' + # line.key + # # => nil + class Line < String + DATE = /\[([^\]]+)\]/ + QUOTED_STRING = /"([^"]+)"/ + REST = /(\S+)/ + LINE_SCANNER = /#{DATE}|#{QUOTED_STRING}|#{REST}/ + + cattr_accessor :decorators + @@decorators = Hash.new {|hash, key| hash[key] = lambda {|entry| CoercibleString.coerce(entry)}} + cattr_reader :fields + @@fields = [] + + class << self + def field(name, offset, type = nil, &block) #:nodoc: + decorators[name] = block if block_given? + fields << name + class_eval(<<-EVAL, __FILE__, __LINE__) + def #{name} + value = parts[#{offset} - 1] + if value == '-' + nil + else + self.class.decorators[:#{name}].call(value) + end + end + memoized :#{name} + EVAL + end + + # Time.parse doesn't like %d/%B/%Y:%H:%M:%S %z so we have to transform it unfortunately + def typecast_time(datetime) #:nodoc: + month = datetime[/[a-z]+/i] + datetime.sub!(%r|^(\w{2})/(\w{3})|, '\2/\1') + datetime.sub!(month, Date::ABBR_MONTHS[month.downcase].to_s) + datetime.sub!(':', ' ') + Time.parse(datetime) + end + end + + def initialize(line) #:nodoc: + super(line) + @parts = parse + end + + field(:owner, 1) {|entry| Owner.new('id' => entry) } + field :bucket, 2 + field(:time, 3) {|entry| typecast_time(entry)} + field :remote_ip, 4 + field(:requestor, 5) {|entry| Owner.new('id' => entry) } + field :request_id, 6 + field :operation, 7 + field :key, 8 + field :request_uri, 9 + field :http_status, 10 + field :error_code, 11 + field :bytes_sent, 12 + field :object_size, 13 + field :total_time, 14 + field :turn_around_time, 15 + field :referrer, 16 + field :user_agent, 17 + + # Returns all fields of the line in a hash of the form :field_name => :field_value. + # + # line.attributes.values_at(:bucket, :key) + # # => ['marcel', 'kiss.jpg'] + def attributes + self.class.fields.inject({}) do |attribute_hash, field| + attribute_hash[field] = send(field) + attribute_hash + end + end + + private + attr_reader :parts + + def parse + scan(LINE_SCANNER).flatten.compact + end + end + end + + module Management #:nodoc: + def self.included(klass) #:nodoc: + klass.extend(ClassMethods) + klass.extend(LoggingGrants) + end + + module ClassMethods + # Returns the logging status for the bucket named name. From the logging status you can determine the bucket logs are delivered to + # and what the bucket object's keys are prefixed with. For more information see the Logging::Status class. + # + # Bucket.logging_status_for 'marcel' + def logging_status_for(name = nil, status = nil) + if name.is_a?(Status) + status = name + name = nil + end + + path = path(name) << '?logging' + status ? put(path, {}, status.to_xml) : Status.new(get(path).parsed) + end + alias_method :logging_status, :logging_status_for + + # Enables logging for the bucket named name. You can specify what bucket to log to with the 'target_bucket' option as well + # as what prefix to add to the log files with the 'target_prefix' option. Unless you specify otherwise, logs will be delivered to + # the same bucket that is being logged and will be prefixed with log-. + def enable_logging_for(name = nil, options = {}) + name = bucket_name(name) + default_options = {'target_bucket' => name, 'target_prefix' => 'log-'} + options = default_options.merge(options) + grant_logging_access_to_target_bucket(options['target_bucket']) + logging_status(name, Status.new(options)) + end + alias_method :enable_logging, :enable_logging_for + + # Disables logging for the bucket named name. + def disable_logging_for(name = nil) + logging_status(bucket_name(name), Status.new) + end + alias_method :disable_logging, :disable_logging_for + + # Returns true if logging has been enabled for the bucket named name. + def logging_enabled_for?(name = nil) + logging_status(bucket_name(name)).logging_enabled? + end + alias_method :logging_enabled?, :logging_enabled_for? + + # Returns the collection of logs for the bucket named name. + # + # Bucket.logs_for 'marcel' + # + # Accepts the same options as Bucket.find, such as :max_keys and :marker. + def logs_for(name = nil, options = {}) + if name.is_a?(Hash) + options = name + name = nil + end + + name = bucket_name(name) + logging_status = logging_status_for(name) + return [] unless logging_status.logging_enabled? + objects(logging_status.target_bucket, options.merge(:prefix => logging_status.target_prefix)).map do |log_object| + Log.new(log_object) + end + end + alias_method :logs, :logs_for + end + + module LoggingGrants #:nodoc: + def grant_logging_access_to_target_bucket(target_bucket) + acl = acl(target_bucket) + acl.grants << ACL::Grant.grant(:logging_write) + acl.grants << ACL::Grant.grant(:logging_read_acp) + acl(target_bucket, acl) + end + end + + def logging_status + self.class.logging_status_for(name) + end + + def enable_logging(*args) + self.class.enable_logging_for(name, *args) + end + + def disable_logging(*args) + self.class.disable_logging_for(name, *args) + end + + def logging_enabled? + self.class.logging_enabled_for?(name) + end + + def logs(options = {}) + self.class.logs_for(name, options) + end + end + end + end +end \ No newline at end of file diff --git a/lib/aws/s3/object.rb b/lib/aws/s3/object.rb new file mode 100644 index 0000000..ba8c51c --- /dev/null +++ b/lib/aws/s3/object.rb @@ -0,0 +1,610 @@ +module AWS + module S3 + # S3Objects represent the data you store on S3. They have a key (their name) and a value (their data). All objects belong to a + # bucket. + # + # You can store an object on S3 by specifying a key, its data and the name of the bucket you want to put it in: + # + # S3Object.store('me.jpg', open('headshot.jpg'), 'photos') + # + # The content type of the object will be inferred by its extension. If the appropriate content type can not be inferred, S3 defaults + # to binary/octect-stream. + # + # If you want to override this, you can explicitly indicate what content type the object should have with the :content_type option: + # + # file = 'black-flowers.m4a' + # S3Object.store( + # file, + # open(file), + # 'jukebox', + # :content_type => 'audio/mp4a-latm' + # ) + # + # You can read more about storing files on S3 in the documentation for S3Object.store. + # + # If you just want to fetch an object you've stored on S3, you just specify its name and its bucket: + # + # picture = S3Object.find 'headshot.jpg', 'photos' + # + # N.B. The actual data for the file is not downloaded in both the example where the file appeared in the bucket and when fetched directly. + # You get the data for the file like this: + # + # picture.value + # + # You can fetch just the object's data directly: + # + # S3Object.value 'headshot.jpg', 'photos' + # + # Or stream it by passing a block to stream: + # + # open('song.mp3', 'w') do |file| + # S3Object.stream('song.mp3', 'jukebox') do |chunk| + # file.write chunk + # end + # end + # + # The data of the file, once download, is cached, so subsequent calls to value won't redownload the file unless you + # tell the object to reload its value: + # + # # Redownloads the file's data + # song.value(:reload) + # + # Other functionality includes: + # + # # Check if an object exists? + # S3Object.exists? 'headshot.jpg', 'photos' + # + # # Copying an object + # S3Object.copy 'headshot.jpg', 'headshot2.jpg', 'photos' + # + # # Renaming an object + # S3Object.rename 'headshot.jpg', 'portrait.jpg', 'photos' + # + # # Deleting an object + # S3Object.delete 'headshot.jpg', 'photos' + # + # ==== More about objects and their metadata + # + # You can find out the content type of your object with the content_type method: + # + # song.content_type + # # => "audio/mpeg" + # + # You can change the content type as well if you like: + # + # song.content_type = 'application/pdf' + # song.store + # + # (Keep in mind that due to limitiations in S3's exposed API, the only way to change things like the content_type + # is to PUT the object onto S3 again. In the case of large files, this will result in fully re-uploading the file.) + # + # A bevie of information about an object can be had using the about method: + # + # pp song.about + # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT", + # "content-type" => "binary/octect-stream", + # "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"", + # "date" => "Sat, 28 Oct 2006 21:30:41 GMT", + # "x-amz-request-id" => "B7BC68F55495B1C8", + # "server" => "AmazonS3", + # "content-length" => "3418766"} + # + # You can get and set metadata for an object: + # + # song.metadata + # # => {} + # song.metadata[:album] = "A River Ain't Too Much To Love" + # # => "A River Ain't Too Much To Love" + # song.metadata[:released] = 2005 + # pp song.metadata + # {"x-amz-meta-released" => 2005, + # "x-amz-meta-album" => "A River Ain't Too Much To Love"} + # song.store + # + # That metadata will be saved in S3 and is hence forth available from that object: + # + # song = S3Object.find('black-flowers.mp3', 'jukebox') + # pp song.metadata + # {"x-amz-meta-released" => "2005", + # "x-amz-meta-album" => "A River Ain't Too Much To Love"} + # song.metada[:released] + # # => "2005" + # song.metada[:released] = 2006 + # pp song.metada + # {"x-amz-meta-released" => 2006, + # "x-amz-meta-album" => "A River Ain't Too Much To Love"} + class S3Object < Base + class << self + # Returns the value of the object with key in the specified bucket. + # + # === Conditional GET options + # + # * :if_modified_since - Return the object only if it has been modified since the specified time, + # otherwise return a 304 (not modified). + # * :if_unmodified_since - Return the object only if it has not been modified since the specified time, + # otherwise raise PreconditionFailed. + # * :if_match - Return the object only if its entity tag (ETag) is the same as the one specified, + # otherwise raise PreconditionFailed. + # * :if_none_match - Return the object only if its entity tag (ETag) is different from the one specified, + # otherwise return a 304 (not modified). + # + # === Other options + # * :range - Return only the bytes of the object in the specified range. + def value(key, bucket = nil, options = {}, &block) + Value.new(get(path!(bucket, key, options), options, &block)) + end + + def stream(key, bucket = nil, options = {}, &block) + value(key, bucket, options) do |response| + response.read_body(&block) + end + end + + # Returns the object whose key is name in the specified bucket. If the specified key does not + # exist, a NoSuchKey exception will be raised. + def find(key, bucket = nil) + # N.B. This is arguably a hack. From what the current S3 API exposes, when you retrieve a bucket, it + # provides a listing of all the files in that bucket (assuming you haven't limited the scope of what it returns). + # Each file in the listing contains information about that file. It is from this information that an S3Object is built. + # + # If you know the specific file that you want, S3 allows you to make a get request for that specific file and it returns + # the value of that file in its response body. This response body is used to build an S3Object::Value object. + # If you want information about that file, you can make a head request and the headers of the response will contain + # information about that file. There is no way, though, to say, give me the representation of just this given file the same + # way that it would appear in a bucket listing. + # + # When fetching a bucket, you can provide options which narrow the scope of what files should be returned in that listing. + # Of those options, one is marker which is a string and instructs the bucket to return only object's who's key comes after + # the specified marker according to alphabetic order. Another option is max-keys which defaults to 1000 but allows you + # to dictate how many objects should be returned in the listing. With a combination of marker and max-keys you can + # *almost* specify exactly which file you'd like it to return, but marker is not inclusive. In other words, if there is a bucket + # which contains three objects who's keys are respectively 'a', 'b' and 'c', then fetching a bucket listing with marker set to 'b' will only + # return 'c', not 'b'. + # + # Given all that, my hack to fetch a bucket with only one specific file, is to set the marker to the result of calling String#previous on + # the desired object's key, which functionally makes the key ordered one degree higher than the desired object key according to + # alphabetic ordering. This is a hack, but it should work around 99% of the time. I can't think of a scenario where it would return + # something incorrect. + + # We need to ensure the key doesn't have extended characters but not uri escape it before doing the lookup and comparing since if the object exists, + # the key on S3 will have been normalized + key = key.remove_extended unless key.utf8? + bucket = Bucket.find(bucket_name(bucket), :marker => key.previous, :max_keys => 1) + # If our heuristic failed, trigger a NoSuchKey exception + if (object = bucket.objects.first) && object.key == key + object + else + raise NoSuchKey.new("No such key `#{key}'", bucket) + end + end + + # Makes a copy of the object with key to copy_name. + def copy(key, copy_key, bucket = nil, options = {}) + bucket = bucket_name(bucket) + original = open(url_for(key, bucket)) + default_options = {:content_type => original.content_type} + store(copy_key, original, bucket, default_options.merge(options)) + acl(copy_key, bucket, acl(key, bucket)) + end + + # Rename the object with key from to have key in to. + def rename(from, to, bucket = nil, options = {}) + copy(from, to, bucket, options) + delete(from, bucket) + end + + # Fetch information about the object with key from bucket. Information includes content type, content length, + # last modified time, and others. + # + # If the specified key does not exist, NoSuchKey is raised. + def about(key, bucket = nil, options = {}) + response = head(path!(bucket, key, options), options) + raise NoSuchKey.new("No such key `#{key}'", bucket) if response.code == 404 + About.new(response.headers) + end + + # Checks if the object with key in bucket exists. + # + # S3Object.exists? 'kiss.jpg', 'marcel' + # # => true + def exists?(key, bucket = nil) + about(key, bucket) + true + rescue NoSuchKey + false + end + + # Delete object with key from bucket. + def delete(key, bucket = nil, options = {}) + # A bit confusing. Calling super actually makes an HTTP DELETE request. The delete method is + # defined in the Base class. It happens to have the same name. + super(path!(bucket, key, options), options).success? + end + + # When storing an object on the S3 servers using S3Object.store, the data argument can be a string or an I/O stream. + # If data is an I/O stream it will be read in segments and written to the socket incrementally. This approach + # may be desirable for very large files so they are not read into memory all at once. + # + # # Non streamed upload + # S3Object.store('greeting.txt', 'hello world!', 'marcel') + # + # # Streamed upload + # S3Object.store('roots.mpeg', open('roots.mpeg'), 'marcel') + def store(key, data, bucket = nil, options = {}) + validate_key!(key) + # Must build path before infering content type in case bucket is being used for options + path = path!(bucket, key, options) + infer_content_type!(key, options) + + put(path, options, data) # Don't call .success? on response. We want to get the etag. + end + alias_method :create, :store + alias_method :save, :store + + # All private objects are accessible via an authenticated GET request to the S3 servers. You can generate an + # authenticated url for an object like this: + # + # S3Object.url_for('beluga_baby.jpg', 'marcel_molina') + # + # By default authenticated urls expire 5 minutes after they were generated. + # + # Expiration options can be specified either with an absolute time since the epoch with the :expires options, + # or with a number of seconds relative to now with the :expires_in options: + # + # # Absolute expiration date + # # (Expires January 18th, 2038) + # doomsday = Time.mktime(2038, 1, 18).to_i + # S3Object.url_for('beluga_baby.jpg', + # 'marcel', + # :expires => doomsday) + # + # # Expiration relative to now specified in seconds + # # (Expires in 3 hours) + # S3Object.url_for('beluga_baby.jpg', + # 'marcel', + # :expires_in => 60 * 60 * 3) + # + # You can specify whether the url should go over SSL with the :use_ssl option: + # + # # Url will use https protocol + # S3Object.url_for('beluga_baby.jpg', + # 'marcel', + # :use_ssl => true) + # + # By default, the ssl settings for the current connection will be used. + # + # If you have an object handy, you can use its url method with the same objects: + # + # song.url(:expires_in => 30) + # + # To get an unauthenticated url for the object, such as in the case + # when the object is publicly readable, pass the + # :authenticated option with a value of false. + # + # S3Object.url_for('beluga_baby.jpg', + # 'marcel', + # :authenticated => false) + # # => http://s3.amazonaws.com/marcel/beluga_baby.jpg + def url_for(name, bucket = nil, options = {}) + connection.url_for(path!(bucket, name, options), options) # Do not normalize options + end + + def path!(bucket, name, options = {}) #:nodoc: + # We're using the second argument for options + if bucket.is_a?(Hash) + options.replace(bucket) + bucket = nil + end + '/' << File.join(bucket_name(bucket), name) + end + + private + + def validate_key!(key) + raise InvalidKeyName.new(key) unless key && key.size <= 1024 + end + + def infer_content_type!(key, options) + return if options.has_key?(:content_type) + if mime_type = MIME::Types.type_for(key).first + options[:content_type] = mime_type.content_type + end + end + end + + class Value < String #:nodoc: + attr_reader :response + def initialize(response) + super(response.body) + @response = response + end + end + + class About < Hash #:nodoc: + def initialize(headers) + super() + replace(headers) + metadata + end + + def [](header) + super(header.to_header) + end + + def []=(header, value) + super(header.to_header, value) + end + + def to_headers + self.merge(metadata.to_headers) + end + + def metadata + Metadata.new(self) + end + memoized :metadata + end + + class Metadata < Hash #:nodoc: + HEADER_PREFIX = 'x-amz-meta-' + SIZE_LIMIT = 2048 # 2 kilobytes + + def initialize(headers) + @headers = headers + super() + extract_metadata! + end + + def []=(header, value) + super(header_name(header.to_header), value) + end + + def [](header) + super(header_name(header.to_header)) + end + + def to_headers + validate! + self + end + + private + attr_reader :headers + + def extract_metadata! + headers.keys.grep(Regexp.new(HEADER_PREFIX)).each do |metadata_header| + self[metadata_header] = headers.delete(metadata_header) + end + end + + def header_name(name) + name =~ Regexp.new(HEADER_PREFIX) ? name : [HEADER_PREFIX, name].join + end + + def validate! + invalid_headers = inject([]) do |invalid, (name, value)| + invalid << name unless valid?(value) + invalid + end + + raise InvalidMetadataValue.new(invalid_headers) unless invalid_headers.empty? + end + + def valid?(value) + value && value.size < SIZE_LIMIT + end + end + + attr_writer :value #:nodoc: + + # Provides readers and writers for all valid header settings listed in valid_header_settings. + # Subsequent saves to the object after setting any of the valid headers settings will be reflected in + # information about the object. + # + # some_s3_object.content_type + # => nil + # some_s3_object.content_type = 'text/plain' + # => "text/plain" + # some_s3_object.content_type + # => "text/plain" + # some_s3_object.store + # S3Object.about(some_s3_object.key, some_s3_object.bucket.name)['content-type'] + # => "text/plain" + include SelectiveAttributeProxy #:nodoc + + proxy_to :about, :exclusively => false + + # Initializes a new S3Object. + def initialize(attributes = {}, &block) + super + self.value = attributes.delete(:value) + self.bucket = attributes.delete(:bucket) + yield self if block_given? + end + + # The current object's bucket. If no bucket has been set, a NoBucketSpecified exception will be raised. For + # cases where you are not sure if the bucket has been set, you can use the belongs_to_bucket? method. + def bucket + @bucket or raise NoBucketSpecified + end + + # Sets the bucket that the object belongs to. + def bucket=(bucket) + @bucket = bucket + self + end + + # Returns true if the current object has been assigned to a bucket yet. Objects must belong to a bucket before they + # can be saved onto S3. + def belongs_to_bucket? + !@bucket.nil? + end + alias_method :orphan?, :belongs_to_bucket? + + # Returns the key of the object. If the key is not set, a NoKeySpecified exception will be raised. For cases + # where you are not sure if the key has been set, you can use the key_set? method. Objects must have a key + # set to be saved onto S3. Objects which have already been saved onto S3 will always have their key set. + def key + attributes['key'] or raise NoKeySpecified + end + + # Sets the key for the current object. + def key=(value) + attributes['key'] = value + end + + # Returns true if the current object has had its key set yet. Objects which have already been saved will + # always return true. This method is useful for objects which have not been saved yet so you know if you + # need to set the object's key since you can not save an object unless its key has been set. + # + # object.store if object.key_set? && object.belongs_to_bucket? + def key_set? + !attributes['key'].nil? + end + + # Lazily loads object data. + # + # Force a reload of the data by passing :reload. + # + # object.value(:reload) + # + # When loading the data for the first time you can optionally yield to a block which will + # allow you to stream the data in segments. + # + # object.value do |segment| + # send_data segment + # end + # + # The full list of options are listed in the documentation for its class method counter part, S3Object::value. + def value(options = {}, &block) + if options.is_a?(Hash) + reload = !options.empty? + else + reload = options + options = {} + end + memoize(reload) do + self.class.stream(key, bucket.name, options, &block) + end + end + + # Interface to information about the current object. Information is read only, though some of its data + # can be modified through specific methods, such as content_type and content_type=. + # + # pp some_object.about + # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT", + # "x-amz-id-2" => "LdcQRk5qLwxJQiZ8OH50HhoyKuqyWoJ67B6i+rOE5MxpjJTWh1kCkL+I0NQzbVQn", + # "content-type" => "binary/octect-stream", + # "etag" => "\"dc629038ffc674bee6f62eb68454ff3a\"", + # "date" => "Sat, 28 Oct 2006 21:30:41 GMT", + # "x-amz-request-id" => "B7BC68F55495B1C8", + # "server" => "AmazonS3", + # "content-length" => "3418766"} + # + # some_object.content_type + # # => "binary/octect-stream" + # some_object.content_type = 'audio/mpeg' + # some_object.content_type + # # => 'audio/mpeg' + # some_object.store + def about + stored? ? self.class.about(key, bucket.name) : About.new + end + memoized :about + + # Interface to viewing and editing metadata for the current object. To be treated like a Hash. + # + # some_object.metadata + # # => {} + # some_object.metadata[:author] = 'Dave Thomas' + # some_object.metadata + # # => {"x-amz-meta-author" => "Dave Thomas"} + # some_object.metadata[:author] + # # => "Dave Thomas" + def metadata + about.metadata + end + memoized :metadata + + # Saves the current object with the specified options. Valid options are listed in the documentation for S3Object::store. + def store(options = {}) + raise DeletedObject if frozen? + options = about.to_headers.merge(options) if stored? + response = self.class.store(key, value, bucket.name, options) + bucket.update(:stored, self) + response.success? + end + alias_method :create, :store + alias_method :save, :store + + # Deletes the current object. Trying to save an object after it has been deleted with + # raise a DeletedObject exception. + def delete + bucket.update(:deleted, self) + freeze + self.class.delete(key, bucket.name) + end + + # Copies the current object, given it the name copy_name. Keep in mind that due to limitations in + # S3's API, this operation requires retransmitting the entire object to S3. + def copy(copy_name, options = {}) + self.class.copy(key, copy_name, bucket.name, options) + end + + # Rename the current object. Keep in mind that due to limitations in S3's API, this operation requires + # retransmitting the entire object to S3. + def rename(to, options = {}) + self.class.rename(key, to, bucket.name, options) + end + + def etag(reload = false) + return nil unless stored? + memoize(reload) do + reload ? about(reload)['etag'][1...-1] : attributes['e_tag'][1...-1] + end + end + + # Returns the owner of the current object. + def owner + Owner.new(attributes['owner']) + end + memoized :owner + + # Generates an authenticated url for the current object. Accepts the same options as its class method + # counter part S3Object.url_for. + def url(options = {}) + self.class.url_for(key, bucket.name, options) + end + + # Returns true if the current object has been stored on S3 yet. + def stored? + !attributes['e_tag'].nil? + end + + def ==(s3object) #:nodoc: + path == s3object.path + end + + def path #:nodoc: + self.class.path!( + belongs_to_bucket? ? bucket.name : '(no bucket)', + key_set? ? key : '(no key)' + ) + end + + # Don't dump binary data :) + def inspect #:nodoc: + "#<%s:0x%s '%s'>" % [self.class, object_id, path] + end + + private + def proxiable_attribute?(name) + valid_header_settings.include?(name) + end + + def valid_header_settings + %w(cache_control content_type content_length content_md5 content_disposition content_encoding expires) + end + end + end +end diff --git a/lib/aws/s3/owner.rb b/lib/aws/s3/owner.rb new file mode 100644 index 0000000..5033810 --- /dev/null +++ b/lib/aws/s3/owner.rb @@ -0,0 +1,44 @@ +module AWS + module S3 + # Entities in S3 have an associated owner (the person who created them). The owner is a canonical representation of an + # entity in the S3 system. It has an id and a display_name. + # + # These attributes can be used when specifying a ACL::Grantee for an ACL::Grant. + # + # You can retrieve the owner of the current account by calling Owner.current. + class Owner + undef_method :id # Get rid of Object#id + include SelectiveAttributeProxy + + class << self + # The owner of the current account. + def current + response = Service.get('/') + new(response.parsed['owner']) if response.parsed['owner'] + end + memoized :current + end + + def initialize(attributes = {}) #:nodoc: + @attributes = attributes + end + + def ==(other_owner) #:nodoc: + hash == other_owner.hash + end + + def hash #:nodoc + [id, display_name].join.hash + end + + private + def proxiable_attribute?(name) + valid_attributes.include?(name) + end + + def valid_attributes + %w(id display_name) + end + end + end +end \ No newline at end of file diff --git a/lib/aws/s3/parsing.rb b/lib/aws/s3/parsing.rb new file mode 100644 index 0000000..c74679b --- /dev/null +++ b/lib/aws/s3/parsing.rb @@ -0,0 +1,99 @@ +#:stopdoc: +module AWS + module S3 + module Parsing + class << self + def parser=(parsing_library) + XmlParser.parsing_library = parsing_library + end + + def parser + XmlParser.parsing_library + end + end + + module Typecasting + def typecast(object) + case object + when Hash + typecast_hash(object) + when Array + object.map {|element| typecast(element)} + when String + CoercibleString.coerce(object) + else + object + end + end + + def typecast_hash(hash) + if content = hash['__content__'] + typecast(content) + else + keys = hash.keys.map {|key| key.underscore} + values = hash.values.map {|value| typecast(value)} + keys.inject({}) do |new_hash, key| + new_hash[key] = values.slice!(0) + new_hash + end + end + end + end + + class XmlParser < Hash + include Typecasting + + class << self + attr_accessor :parsing_library + end + + attr_reader :body, :xml_in, :root + + def initialize(body) + @body = body + unless body.strip.empty? + parse + set_root + typecast_xml_in + end + end + + private + + def parse + @xml_in = self.class.parsing_library.xml_in(body, parsing_options) + end + + def parsing_options + { + # Includes the enclosing tag as the top level key + 'keeproot' => true, + # Makes tag value available via the '__content__' key + 'contentkey' => '__content__', + # Always parse tags into a hash, even when there are no attributes + # (unless there is also no value, in which case it is nil) + 'forcecontent' => true, + # If a tag is empty, makes its content nil + 'suppressempty' => nil, + # Force nested elements to be put into an array, even if there is only one of them + 'forcearray' => ['Contents', 'Bucket', 'Grant'] + } + end + + def set_root + @root = @xml_in.keys.first.underscore + end + + def typecast_xml_in + typecast_xml = {} + @xml_in.dup.each do |key, value| # Some typecasting is destructive so we dup + typecast_xml[key.underscore] = typecast(value) + end + # An empty body will try to update with a string so only update if the result is a hash + update(typecast_xml[root]) if typecast_xml[root].is_a?(Hash) + end + end + end + end +end +#:startdoc: \ No newline at end of file diff --git a/lib/aws/s3/response.rb b/lib/aws/s3/response.rb new file mode 100644 index 0000000..fa3b8a1 --- /dev/null +++ b/lib/aws/s3/response.rb @@ -0,0 +1,180 @@ +#:stopdoc: +module AWS + module S3 + class Base + class Response < String + attr_reader :response, :body, :parsed + def initialize(response) + @response = response + @body = response.body.to_s + super(body) + end + + def headers + headers = {} + response.each do |header, value| + headers[header] = value + end + headers + end + memoized :headers + + def [](header) + headers[header] + end + + def each(&block) + headers.each(&block) + end + + def code + response.code.to_i + end + + {:success => 200..299, :redirect => 300..399, + :client_error => 400..499, :server_error => 500..599}.each do |result, code_range| + class_eval(<<-EVAL, __FILE__, __LINE__) + def #{result}? + return false unless response + (#{code_range}).include? code + end + EVAL + end + + def error? + !success? && response['content-type'] == 'application/xml' && parsed.root == 'error' + end + + def error + Error.new(parsed, self) + end + memoized :error + + def parsed + # XmlSimple is picky about what kind of object it parses, so we pass in body rather than self + Parsing::XmlParser.new(body) + end + memoized :parsed + + def inspect + "#<%s:0x%s %s %s>" % [self.class, object_id, response.code, response.message] + end + end + end + + class Bucket + class Response < Base::Response + def bucket + parsed + end + end + end + + class S3Object + class Response < Base::Response + def etag + headers['etag'][1...-1] + end + end + end + + class Service + class Response < Base::Response + def empty? + parsed['buckets'].nil? + end + + def buckets + parsed['buckets']['bucket'] || [] + end + end + end + + module ACL + class Policy + class Response < Base::Response + alias_method :policy, :parsed + end + end + end + + # Requests whose response code is between 300 and 599 and contain an in their body + # are wrapped in an Error::Response. This Error::Response contains an Error object which raises an exception + # that corresponds to the error in the response body. The exception object contains the ErrorResponse, so + # in all cases where a request happens, you can rescue ResponseError and have access to the ErrorResponse and + # its Error object which contains information about the ResponseError. + # + # begin + # Bucket.create(..) + # rescue ResponseError => exception + # exception.response + # # => + # exception.response.error + # # => + # end + class Error + class Response < Base::Response + def error? + true + end + + def inspect + "#<%s:0x%s %s %s: '%s'>" % [self.class.name, object_id, response.code, error.code, error.message] + end + end + end + + # Guess response class name from current class name. If the guessed response class doesn't exist + # do the same thing to the current class's parent class, up the inheritance heirarchy until either + # a response class is found or until we get to the top of the heirarchy in which case we just use + # the the Base response class. + # + # Important: This implemantation assumes that the Base class has a corresponding Base::Response. + class FindResponseClass #:nodoc: + class << self + def for(start) + new(start).find + end + end + + def initialize(start) + @container = AWS::S3 + @current_class = start + end + + def find + self.current_class = current_class.superclass until response_class_found? + target.const_get(class_to_find) + end + + private + attr_reader :container + attr_accessor :current_class + + def target + container.const_get(current_name) + end + + def target? + container.const_defined?(current_name) + end + + def response_class_found? + target? && target.const_defined?(class_to_find) + end + + def class_to_find + :Response + end + + def current_name + truncate(current_class) + end + + def truncate(klass) + klass.name[/[^:]+$/] + end + end + end +end +#:startdoc: \ No newline at end of file diff --git a/lib/aws/s3/service.rb b/lib/aws/s3/service.rb new file mode 100644 index 0000000..e68a80a --- /dev/null +++ b/lib/aws/s3/service.rb @@ -0,0 +1,51 @@ +module AWS + module S3 + # The service lets you find out general information about your account, like what buckets you have. + # + # Service.buckets + # # => [] + class Service < Base + @@response = nil #:nodoc: + + class << self + # List all your buckets. + # + # Service.buckets + # # => [] + # + # For performance reasons, the bucket list will be cached. If you want avoid all caching, pass the :reload + # as an argument: + # + # Service.buckets(:reload) + def buckets + response = get('/') + if response.empty? + [] + else + response.buckets.map {|attributes| Bucket.new(attributes)} + end + end + memoized :buckets + + # Sometimes methods that make requests to the S3 servers return some object, like a Bucket or an S3Object. + # Othertimes they return just true. Other times they raise an exception that you may want to rescue. Despite all these + # possible outcomes, every method that makes a request stores its response object for you in Service.response. You can always + # get to the last request's response via Service.response. + # + # objects = Bucket.objects('jukebox') + # Service.response.success? + # # => true + # + # This is also useful when an error exception is raised in the console which you weren't expecting. You can + # root around in the response to get more details of what might have gone wrong. + def response + @@response + end + + def response=(response) #:nodoc: + @@response = response + end + end + end + end +end \ No newline at end of file diff --git a/lib/aws/s3/version.rb b/lib/aws/s3/version.rb new file mode 100644 index 0000000..4b8b7a2 --- /dev/null +++ b/lib/aws/s3/version.rb @@ -0,0 +1,12 @@ +module AWS + module S3 + module VERSION #:nodoc: + MAJOR = '0' + MINOR = '4' + TINY = '0' + BETA = nil # Time.now.to_i.to_s + end + + Version = [VERSION::MAJOR, VERSION::MINOR, VERSION::TINY, VERSION::BETA].compact * '.' + end +end diff --git a/site/index.erb b/site/index.erb new file mode 100644 index 0000000..17712de --- /dev/null +++ b/site/index.erb @@ -0,0 +1,41 @@ + + + + + + + AWS::S3 - Ruby Library for Amazon Simple Storage Service (S3) + + + + + + +
+
+ +
+
+
+

AWS::S3

+

A Ruby Library for Amazon's Simple
Storage Service's (S3) REST API.

+ +

Download with RubyGems

+
sudo gem i aws-s3
+

Download from the svn repository

+
svn co svn://rubyforge.org/var/svn/amazon/s3/trunk aws
+
+ +
+ +

Readme

+

Getting started

+<%= erb_data[:readme] %> + + diff --git a/site/public/images/box-and-gem.gif b/site/public/images/box-and-gem.gif new file mode 100644 index 0000000000000000000000000000000000000000..ed71134204e3e8c6286f286e8eb7fddb367cb02e GIT binary patch literal 7201 zcmWlbc|6mP-Bg&9#0)T?aQh*AAnq72LOs6Jm?VQEd~HDIDA|Z%SuaI z|NHlFbaX3BxeEr`tyesl8Q;j@+8+D8lh60-#+9uArQ@wFx{Kp6lUXD#eDFPYIy7`W z3G>I>W9~np&4Kl?Q8*hbOZwm z+F4!MY-u?djobR!vmV2<5zfbokKfAWob%M*?CM(m`enVe^mu>&U}173KYvJ4^muJ> zBT!&7`SyoX5-IGU$;Z6=D=YhheMj@nR1j$IlV$}Pz1MtccYb~}CHar7)$a84znrW| z)7v|5(feIpTQTQ1y>P2Pf2_W**zbP4OQ|^CTlrU5u-Qssy6`6>5bputkdgOW@8*7c z8*6uW-IMRzEuLM*pALQfbZ+)`d)xTa3QM0_w@c&?*4Ab!-c3Dmp3F!M;o#W+YC0Di z{Xtb_qKXp1!!ues&uv3hl%Fo{^LJ!(Wvi>a{Eq5R9?u>{EF2r1D zK|tm+(w0(GcS_y1(*--?;_|e#rd6)SvI8Z8AlAbCL}Kh}a?)&S%9_8%PdVv21Y+rX zXXR;`Df^4e@84EZWoiV$i=G~t2*ge)=X7Izj~IWO3Vc{qlvzW`f&+^+6~E5NZ)D(D zhxb^+%&hU0jDuU6?ykB1v;b(Pi7u;1%>v^uj?S~CB+oppS?l5?p?3RC!LdS{hLtPv5~t_eP9Y-G}U_ZJrrmj3P5NG?5<+Fe_l z@>XE|{Q39$_t*ccs|3wru_X4O!2e6}|Ly-j0U%fai~WQyCATdU!K?6~lalv20&_|~ zOSdZjYmAVx%UEYs!M6nL&FB+))rH@aQ&273UFb@y z0O!KG6IE`#D(Llh+o%r|8Nw<%Jck(FE+dvse0KXpQlV>6?840>T(xdntXftVC@Z1+q+LH_a)39%@S~dMyB|Z{FF&@n z9)vkp_Wm&nI6Ml!7WnNCvA1eRTw{)1hY>;_hl~V!IOSnO`D`0vPI{au2@m4;PXB!t z33bQbu`rmQjPe_Yr1^i9(oDCJ>zGdvcinvc#@@P>Wa^^iJ^x6K(HV0Y)Dvvq37a4??-)510Z23D0#&i{|KkR@)s&aM#4;Z0mfb5^-DR`NXc%7Zjl z4>|G3Xi2GvlO3}##fR-Y4KW#IF4B(zGbThPY8Dz-O5K80&x$~^7u_w>8b-$p zq~Ewzr#Tekx4YgnnN>9}XXwAz7f$=`(&SpR(3Nqzte&*U70DG{jt}28D_RZM87#BU zRG4M_{m`f+v}VG)0Z%R4luQ?RhYE5v?S!n{IeSxsH1#A_LGbH(h~J&6BDbL79|Jvn zB)Qa|8ggR=y;ngGCuv#U;kAbPcRRfX4AkB34ssY8)D5ZFwB`;Qcp4;+Ktp;;>E{#G zb4TOy(aFCKtHx_?R;7Lp9zQW%aS8rokVU(#tb+60(Iu?PHe`x>0wfLvfLDuOzfsax z)Xf~|oDsT^zlTn^temYA-uuGm6}6-IlC^g1U0)Gh#VRcYNe<=PinZTvwk?Yq(|Uwj zrZ-IH*W)G%vuAL3BViXB!M*FuMzA(dK`ar(gLRbvgXmMtV4*Nf{;i!bR|@Ei9W(TV z4z*l1Aoe!SL>eAWwgw?aLaTd@>zcUJZrPb&!i>637K= zoeF_wFrZf%5tg|(F(PAFIE$C2!4*Vc_te$k?`Lv~5qgRJ);6Gry6DT)V2)?IV_ITD zVkds;MDUJjaVe8yog*bdj(qu;nB6uv;!W)6E^y)^=Br$^M6{xpgg{9Z>~d~9pi-D2 zI0Mm^+122l*M*(Umx$IzPA0lMDBw294GSJ4f+tUf>|VseuJoITm<*m0pTtFOxb=}j zrKH$;7@S(#r%-*DH7|r=k-FMRp>j;D8bS$j$=(B^Nh_39A8l9P=H}n(LqrjxC0%ln7Zh(y}-K zPJlTs&$eJVjM>W92t3WJ)CbZEkih6YR8ZUz>HZkc7k4y9I+J7_%q=7U`9KDE7>O`3 zjH!rSI7EfuX9h@lRv3KFdk~co_0w;-=J^(zRcZ#pVz#ETI!pSDg#10Q6i8?lAL3ft z03Bh~#ltfdWhLHq(yoC4a{A2R{Kd?^UXd5Pom-)*N#}nhkWwnE2j42*KhU*&Y7AzP ziHLgYI9v@6Wt`Mj5W^4+ytz2%z2+@ga(&*U(lcMq6uMuJ8QWKQ^`Wq6*7K&wLzCMt z3hVg(itQ6y>D<;r^v_~)cz0`%ZJxL}5vGFEm?TwSYK+VHFQCom!L&pE$E^xY=_`~6 zEl51zH*?y{Ck7yyGb_QEb>4byX zB@4`b@CV6Z(na0aKP^FgZw}(l#=I>zTCBI>l+*<{{Xj@DIOB)YHubeSmMGEe%K}Pp zH~dFMig%Sh)JOzXr<|5}EPoZxoo`Tas_{B*R^2n`6WQ2s{%_#QDP^+K`4g0mF0 zf56Ez{4@tq{D*3duJ2Ozb&kIVk?FmGD;sYhXPjC?YH6p3l^)qe1^*M1>`%$5mHXkO zE%=Bx(@l&@h6BXc)^wwrQ_Sq_02k98VHhpGUU)@eUgMMBr>dZ{*Vam%AWR0PX?siM zuIRY`#9f31HH~a#?__C`T5>B{c{XF8dpMhDTK|aopQ+FCkBk!}DvuL@)k`%v<+_)pq}9G80DqviBV5G6Wy>=zVq zLGNTmIaix0adoQz%m8)IG1gr~#J)xtwfsA%@^P|m-+nr)AxzhERB$?+=;>%-{&~uw zB{d~nQ~9>DeA$JVoU`Qy;Xv zLtJ*`D5P?B%KERruzfCwHf%7Z%{?0!5PU4$In@r}6u0&o8EWaEn?EP{9eu6F?rW5AA>elk9R#_53*5&kqR z+K=Ikh;-Eres+N#&Or{frU&KWgKIDO`#!g|!2Rbl6mUHIQY{z6B7+lhLG?JuaxVPn zeu%VCIH)9qZ$@Lsv`aA++e-bMzt%sZqL0xKF3(>qUD7;v=@Xks_6HZmqQ z67Iu59LNyY;Bk3^VU+tJYZN#O;1IGl?+*n@r@+?85dm`HLAo|Hf^9zo{f!*Q-WXSd zh)4LwOgEB!@Ejj89O@LGBq!2Pef&|H=@EuY83%t3fO*KUXcf!quaKjLgwm4eAs<^! zj61Y695xz`h$5dMfH*9Y^DuxW3kz$Z$9yb_kdpQ2nIS2Y!2%Sv8a6PN0@bsytYY}e zmIO}BIj~BSWU=nNb5>(Q$x5T(2cuvWd3NgsL>&fLV}hD6i9__5>=%Jv_Y=EiJ@DHk zc`{6rnDYMfLrccP32Cs=?#r=}1U|D6O#+lVH5@00k;qM*pr^(zfX@PHWZ^V-8cGer zr8>&h@GbetLTIWSU?9jrW5CcjxGo+TuY1Mdzgm3?TulrCmqhRDLN76W$PuaMzo$2j zg5&qn6BodS7fXdQ(u1cPdqM}PIrTATBoAcL)$fE+HH-68uj$qtGE zg5-&V^e`DY^iw?7pVBx9T+V~2oLOh~E4h#~Tpn*UP{-u7q2;VvWT~mXx#t6A0c?`S z=+V2`N2(5w>+P;>rv%E`%aukb(gF7W$f#fGoQq&SWZo4#dkQ9x+lBo+Uj5%f&Kh3l zs%m1iV1_c2OMwXaO~_XhD7s2=xGocZRWD9GH-gQFNYaapEX}->QE+9kAceuE)>VMP zK$ftep;KT@zt^wj6Ki#dOV|(-3}B>CWZLwWEC6Jb_}tfzE2BXS=r0xLLj)7~l|9p0 zEG(zZmAq>rrH+eiDf*=d0I|_oEWDVbmaEr>dFv^XA&td6r)Zcoy>%9FZWM5?*n0(D zbO`MVt&&Cg5n$eVd~#C~cP2}k<%<)W3LLu%tO#r=NKg?DSz!$l!h(c;<$!3VqCkd1 zBS@Bt0K0?ZW}QGdbV6}ilDBhWpO&_~{I7e?h1B-jQ+0bx9E#XH*h%&4rQ3UJ+OvfdX|09}%9U%z{k#YpZcjElEZ<@U`_3NjfpLq0uBwCP?{osUfW~{-BX8`r$ki_-zcA-$FPMpj`?e9vY&6SOW-$x8g$B$a@tS}4A(t&o|{#26NxG@Pl^){nhl9S`{|ZNd9WNNWbhRf zW)1Y-#pu}sfi&PAnJpkPcv{BuKS9s*sH8z+?Zi?8n1zS-S)~S3k9j!aq~ct=1^^L`^*!V2p!ElhM)u*nc<0N!N!SLx9E<1s$}OVK}O- z(bG#lGKn7{eX?x>A6NX@Hh;Hmio)$=pcRak(=(rxDe#25@B$|CJr&|-o|H54GUOF? zr_hX$z&3wBuKMsto)1y7?!n4mcz;&k7orUM+G)|Zt7_^k)uyg5JyC!FQI zke=2ke+=~=@oTuWI`%bcj^;vhO5$Nfk!}E<5c7`9Y9&r5fK_UdVxN${?KySXZ^C4d z5#ig5L#Xc|NE443HwII&o#CmdZMh^}8cGNiSCU?M)2!*?3j}njmGY#c3=egLfg=}r z(%;F?C6z}=@S_-zdCHG@+V?ky&|Z$7e#IUP1u}<2w`(GJ7|8H&P{nO^ZwpHT5S__2V8ls1RiDU1E z;ZaO7O6Nn$i-m_(1Ruk6k7c>Ify=I{&;6s`cAs#o3KB(tXeb`*@~b9lO}B7vdA$c? zKZJ4Yz!4N-jJ(K1gIRs>5aU8vIGZh#)0shg`#M9;{^yWs-(h4D*G@uJ^k;noG%skAN9qf#JwfA1gEd?S}uA#eME=_RzjD5@pQ=H-Wsl38$cMXPDyY6i`E; z-5r(z`*_3SLy#m6S!*HC*?J;~@N1U>>5aqWqeU~YcVQ}8 z1RBb&_^spic+FV;g4)EOWyrQYdW4L~L4Pm9a7pGuR}BO^4KOGQ1W*0dmyMYr!Y*eI zoUMh4GVDydDvVu!?mHF5Tymu`#{IOQ!^DolcgS(k@0v{Dg7IW<+tkUQm}#OVn>5;Q z^MX!<#@x~cja29))q#FTPrph7 zsv>+^)T|jSyqfQ zR^4&}-Ll)NeJ9Ca43w0AS z*)$0g)i`ugyaQkey%@CMj05?9Ups&s8iobtrOut>qr zP7(u@5!pvLhkjk-K0(k6Bckaz3kdNpoz^*I`Va9Y`j>3qY!AaZ0Bw49b>nF^l#HyR zLFhDuTHUI^iR@EicMuMB??kSqleHC z{^(614bBpx)#78bCxrij*}RcYh(`VT*~)bz@g0+MTV9pxbh2^7XGjPWa3e2U8*wL4 z;r3KxopWg4@`P_I+wPxsjs8H`Q;q+6Uge`g)EKZ3%EMbks52QANdysd7hYcH{>g;d z6E;<~&=KUtYRC7{tj#M&yNPdhYH8?k2FwNn^UH||kjazU4HuoBv#J>e?-CFd%}G7E)S(l!SMi|?IQVdot->BLRSMZT93$ThDb z=EVr2-Ns77Y_hK~Bo9wn^iS)h$lrT!KiWTYD_t>jOT%-DR4n?8rg^7-;BAu0^-#_u zME|_eo5U26J0el?-{M~7TQyzaG)Q&L=+Lm`A6U9ivHAMmOFLmi)XI4PdH27s%a)Dq ze_Z;@7S6=!UMQn6e!p7f6S)gTk-VJsZFJlDPREBhOayz_NgEX_?)`{Wdc5K~%c00j zk(;`aAaL?)t#hA~NP*3`hKf1Gf zTM=g*p-4VUhbn2R1vE=X9m;EpYyBbOMbY5VWk|N~Hm2vDVs zficH7hLXRZX$jmf%LAd!i8ne}$4^3HyvIH#-VpOmIrnjIp4Wq95vfQM5VoH;))UBZ z^$-`h5NV>XP*ey*hJNRs)3-Yvk#=!shD}{-REF+7)pNT?w?*r=11`y0^`liy7d`XdUtrfv^K&W7b~w&(&V3nAeT`_^{_iz{Y6koFzPTiLB7fkle28-W z@UJJ0Q!9RR<`itMH#zDTbna{?z;$NQ^eZkpPb?o=Ara^2+q;cfoj%|kE%YeCslAL%C znhwi%e)&wqYi4(;mGpAeeXHYx%F2&^drLd4m%x9OfzbnEFTp-|sMV@?qK4=J%>3Lx ay^dh6LTk1&J{lYj#-XB8|KS1P>i+?+SZ2Ba literal 0 HcmV?d00001 diff --git a/site/public/images/favicon.ico b/site/public/images/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..a8ff2561d4a86c1b11aa47325e50be29afac6dde GIT binary patch literal 1406 zcmeIvYfwyK7zglwt!>NJT9wK*BDs^+(vn=VHkXoI3aQ+>xU-T7x$jV!ahc*nluE0~ zEyk2)ltIG`(}$WdQ!S<$CPSsmIcG2DcuvL)AAIu7c+dRa=lws=%z0@@ijss zDTTXdDP3|0(}V6KD*8Un${}$phquoo$XzNRbE-sAauq_f)mWgdfj0Ch*2g}Fzv=}7 z)UOZ|_ZnW2Z;+Py25L<`!Xg@wnbiQ@lDD{=+lZ`e6OvNiLy^&fYxylup8JUU`j1G= z_ymLD3mTig!Xvj0CB}9%o7$0ZsRJ(CJE1w-33Wjis%yG|>pg@4@<{;$yBiE7#J-R! zkrJ6$ix@Z%8R=Wh!1*Qv7os4_5(chD2JW{QsO~YyCf;Q%M&4oJcbCPaau$Fc z8`<&73`Lm*^4k`q*IAI)XhmL=6$Q;!6q&3j`(VYh&o{H z*Tti!hX>2@Xy2Ow!yne+~fC^l5{;qr6XFuMuKQ-M)HS_)tHkgGB0PIJz@_$;_40)ssgK z9_WgG2lcaNFARy95#=N4@9S+R+pG`Kh0a?a7Cq5L;W@a9q%z<)NuPr zlyyOCBWLTjO&vF7*q|-?Idd1UNJ@wd4O(+Xyln5%_{5FNmTyXqjYt!7`tEhX8m(3{ hFK*8XF&hkNyEZIZ7`NlVkJP*G-}g)S{rI :test + +Rake::TestTask.new do |test| + test.pattern = 'test/*_test.rb' + test.verbose = true +end + + +Rake::RDocTask.new do |rdoc| + rdoc.rdoc_dir = 'doc' + rdoc.title = "FasterXmlSimple, a libxml based replacement for XmlSimple" + rdoc.options << '--line-numbers' << '--inline-source' + rdoc.rdoc_files.include('README') + rdoc.rdoc_files.include('COPYING') + rdoc.rdoc_files.include('lib/**/*.rb') +end + +namespace :dist do + + spec = Gem::Specification.new do |s| + s.name = 'faster_xml_simple' + s.version = Gem::Version.new(FasterXmlSimple::Version) + s.summary = "A libxml based replacement for XmlSimple" + s.description = s.summary + s.email = 'michael@koziarski.com' + s.author = 'Michael Koziarski' + s.has_rdoc = true + s.extra_rdoc_files = %w(README COPYING) + s.homepage = 'http://fasterxs.rubyforge.org' + s.rubyforge_project = 'fasterxs' + s.files = FileList['Rakefile', 'lib/**/*.rb'] + s.test_files = Dir['test/**/*'] + + s.add_dependency 'libxml-ruby', '>= 0.3.8.4' + s.rdoc_options = ['--title', "", + '--main', 'README', + '--line-numbers', '--inline-source'] + end + Rake::GemPackageTask.new(spec) do |pkg| + pkg.need_tar_gz = true + pkg.package_files.include('{lib,test}/**/*') + pkg.package_files.include('README') + pkg.package_files.include('COPYING') + pkg.package_files.include('Rakefile') + end +end \ No newline at end of file diff --git a/support/faster-xml-simple/lib/faster_xml_simple.rb b/support/faster-xml-simple/lib/faster_xml_simple.rb new file mode 100644 index 0000000..98326ed --- /dev/null +++ b/support/faster-xml-simple/lib/faster_xml_simple.rb @@ -0,0 +1,187 @@ +# +# Copyright (c) 2006 Michael Koziarski +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in the +# Software without restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the +# Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +require 'rubygems' +require 'xml/libxml' + +class FasterXmlSimple + Version = '0.5.0' + class << self + # Take an string containing XML, and returns a hash representing that + # XML document. For example: + # + # FasterXmlSimple.xml_in("1") + # {"root"=>{"something"=>{"__content__"=>"1"}}} + # + # Faster XML Simple is designed to be a drop in replacement for the xml_in + # functionality of http://xml-simple.rubyforge.org + # + # The following options are supported: + # + # * contentkey: The key to use for the content of text elements, + # defaults to '\_\_content__' + # * forcearray: The list of elements which should always be returned + # as arrays. Under normal circumstances single element arrays are inlined. + # * suppressempty: The value to return for empty elements, pass +true+ + # to remove empty elements entirely. + # * keeproot: By default the hash returned has a single key with the + # name of the root element. If the name of the root element isn't + # interesting to you, pass +false+. + # * forcecontent: By default a text element with no attributes, will + # be collapsed to just a string instead of a hash with a single key. + # Pass +true+ to prevent this. + # + # + def xml_in(string, options={}) + new(string, options).out + end + end + + def initialize(string, options) #:nodoc: + @doc = parse(string) + @options = default_options.merge options + end + + def out #:nodoc: + if @options['keeproot'] + {@doc.root.name => collapse(@doc.root)} + else + collapse(@doc.root) + end + end + + private + def default_options + {'contentkey' => '__content__', 'forcearray' => [], 'keeproot'=>true} + end + + def collapse(element) + result = hash_of_attributes(element) + if text_node? element + text = collapse_text(element) + result[content_key] = text if text =~ /\S/ + elsif element.children? + element.inject(result) do |hash, child| + unless child.text? + child_result = collapse(child) + (hash[child.name] ||= []) << child_result + end + hash + end + end + if result.empty? + return empty_element + end + # Compact them to ensure it complies with the user's requests + inline_single_element_arrays(result) + remove_empty_elements(result) if suppress_empty? + if content_only?(result) && !force_content? + result[content_key] + else + result + end + end + + def content_only?(result) + result.keys == [content_key] + end + + def content_key + @options['contentkey'] + end + + def force_array?(key_name) + Array(@options['forcearray']).include?(key_name) + end + + def inline_single_element_arrays(result) + result.each do |key, value| + if value.size == 1 && value.is_a?(Array) && !force_array?(key) + result[key] = value.first + end + end + end + + def remove_empty_elements(result) + result.each do |key, value| + if value == empty_element + result.delete key + end + end + end + + def suppress_empty? + @options['suppressempty'] == true + end + + def empty_element + if !@options.has_key? 'suppressempty' + {} + else + @options['suppressempty'] + end + end + + # removes the content if it's nothing but blanks, prevents + # the hash being polluted with lots of content like "\n\t\t\t" + def suppress_empty_content(result) + result.delete content_key if result[content_key] !~ /\S/ + end + + def force_content? + @options['forcecontent'] + end + + # a text node is one with 1 or more child nodes which are + # text nodes, and no non-text children, there's no sensible + # way to support nodes which are text and markup like: + #

Something Bold

+ def text_node?(element) + !element.text? && element.all? {|c| c.text?} + end + + # takes a text node, and collapses it into a string + def collapse_text(element) + element.map {|c| c.content } * '' + end + + def hash_of_attributes(element) + result = {} + element.each_attr do |attribute| + name = attribute.name + name = [attribute.ns, attribute.name].join(':') if attribute.ns? + result[name] = attribute.value + end + result + end + + def parse(string) + if string == '' + string = ' ' + end + XML::Parser.string(string).parse + end +end + +class XmlSimple # :nodoc: + def self.xml_in(*args) + FasterXmlSimple.xml_in *args + end +end \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-1.rails.yml b/support/faster-xml-simple/test/fixtures/test-1.rails.yml new file mode 100644 index 0000000..b058f3e --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-1.rails.yml @@ -0,0 +1,4 @@ +--- +something: + something-else: + __content__: testing diff --git a/support/faster-xml-simple/test/fixtures/test-1.xml b/support/faster-xml-simple/test/fixtures/test-1.xml new file mode 100644 index 0000000..cd2c07b --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-1.xml @@ -0,0 +1,3 @@ + + testing + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-1.yml b/support/faster-xml-simple/test/fixtures/test-1.yml new file mode 100644 index 0000000..502e7f8 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-1.yml @@ -0,0 +1,4 @@ +--- +something: + something-else: + - __content__: testing diff --git a/support/faster-xml-simple/test/fixtures/test-2.rails.yml b/support/faster-xml-simple/test/fixtures/test-2.rails.yml new file mode 100644 index 0000000..d5f09dc --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-2.rails.yml @@ -0,0 +1,6 @@ +--- +something: + something-else: + __content__: testing + child_attribute: "15" + root_attribute: "12" diff --git a/support/faster-xml-simple/test/fixtures/test-2.xml b/support/faster-xml-simple/test/fixtures/test-2.xml new file mode 100644 index 0000000..5405647 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-2.xml @@ -0,0 +1,3 @@ + + testing + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-2.yml b/support/faster-xml-simple/test/fixtures/test-2.yml new file mode 100644 index 0000000..89c3d11 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-2.yml @@ -0,0 +1,6 @@ +--- +something: + something-else: + - __content__: testing + child_attribute: "15" + root_attribute: "12" diff --git a/support/faster-xml-simple/test/fixtures/test-3.rails.yml b/support/faster-xml-simple/test/fixtures/test-3.rails.yml new file mode 100644 index 0000000..dddff1e --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-3.rails.yml @@ -0,0 +1,6 @@ +--- +something: + something-else: + __content__: "\n\ + \t\ttesting\n\ + \t" diff --git a/support/faster-xml-simple/test/fixtures/test-3.xml b/support/faster-xml-simple/test/fixtures/test-3.xml new file mode 100644 index 0000000..6e77829 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-3.xml @@ -0,0 +1,5 @@ + + + testing + + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-3.yml b/support/faster-xml-simple/test/fixtures/test-3.yml new file mode 100644 index 0000000..ae1e05e --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-3.yml @@ -0,0 +1,6 @@ +--- +something: + something-else: + - __content__: "\n\ + \t\ttesting\n\ + \t" diff --git a/support/faster-xml-simple/test/fixtures/test-4.rails.yml b/support/faster-xml-simple/test/fixtures/test-4.rails.yml new file mode 100644 index 0000000..7e9e06c --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-4.rails.yml @@ -0,0 +1,5 @@ +--- +something: + something-else: + another_child: + attribute: "4" diff --git a/support/faster-xml-simple/test/fixtures/test-4.xml b/support/faster-xml-simple/test/fixtures/test-4.xml new file mode 100644 index 0000000..ff98fa6 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-4.xml @@ -0,0 +1,7 @@ + + + testing + + testing + + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-4.yml b/support/faster-xml-simple/test/fixtures/test-4.yml new file mode 100644 index 0000000..8d62c26 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-4.yml @@ -0,0 +1,5 @@ +--- +something: + something-else: + - another_child: + attribute: "4" diff --git a/support/faster-xml-simple/test/fixtures/test-5.rails.yml b/support/faster-xml-simple/test/fixtures/test-5.rails.yml new file mode 100644 index 0000000..c8e4387 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-5.rails.yml @@ -0,0 +1,8 @@ +--- +something: + something-else: + - __content__: testing + - __content__: testing + - __content__: testing + - __content__: testing + - __content__: testing diff --git a/support/faster-xml-simple/test/fixtures/test-5.xml b/support/faster-xml-simple/test/fixtures/test-5.xml new file mode 100644 index 0000000..3a22ab2 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-5.xml @@ -0,0 +1,7 @@ + + testing + testing + testing + testing + testing + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-5.yml b/support/faster-xml-simple/test/fixtures/test-5.yml new file mode 100644 index 0000000..c8e4387 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-5.yml @@ -0,0 +1,8 @@ +--- +something: + something-else: + - __content__: testing + - __content__: testing + - __content__: testing + - __content__: testing + - __content__: testing diff --git a/support/faster-xml-simple/test/fixtures/test-6.rails.yml b/support/faster-xml-simple/test/fixtures/test-6.rails.yml new file mode 100644 index 0000000..1ceb02c --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-6.rails.yml @@ -0,0 +1,43 @@ +--- +ListBucketResult: + Prefix: {} + + Name: + __content__: projectionist + MaxKeys: + __content__: "1000" + Contents: + - StorageClass: + __content__: STANDARD + Owner: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + Size: + __content__: "186870" + ETag: + __content__: "\"2ac1aa042e20ab7e1a9879b0df9f17b7\"" + LastModified: + __content__: "2006-11-15T05:49:39.000Z" + Key: + __content__: 1973-plymouth-satellite-sebring.jpg + - StorageClass: + __content__: STANDARD + Owner: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + Size: + __content__: "43562" + ETag: + __content__: "\"4ead118ba91491f9c9697153264a1943\"" + LastModified: + __content__: "2006-11-15T05:51:20.000Z" + Key: + __content__: 37-cluster.jpg + Marker: {} + + IsTruncated: + __content__: "false" diff --git a/support/faster-xml-simple/test/fixtures/test-6.xml b/support/faster-xml-simple/test/fixtures/test-6.xml new file mode 100644 index 0000000..10967ee --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-6.xml @@ -0,0 +1,29 @@ + + projectionist + + + 1000 + false + + 1973-plymouth-satellite-sebring.jpg + 2006-11-15T05:49:39.000Z + "2ac1aa042e20ab7e1a9879b0df9f17b7" + 186870 + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + noradio + + STANDARD + + + 37-cluster.jpg + 2006-11-15T05:51:20.000Z + "4ead118ba91491f9c9697153264a1943" + 43562 + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + noradio + + STANDARD + + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-6.yml b/support/faster-xml-simple/test/fixtures/test-6.yml new file mode 100644 index 0000000..f180c4e --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-6.yml @@ -0,0 +1,41 @@ +--- +ListBucketResult: + Prefix: + Name: + __content__: projectionist + MaxKeys: + __content__: "1000" + Contents: + - StorageClass: + __content__: STANDARD + Owner: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + Size: + __content__: "186870" + ETag: + __content__: "\"2ac1aa042e20ab7e1a9879b0df9f17b7\"" + LastModified: + __content__: "2006-11-15T05:49:39.000Z" + Key: + __content__: 1973-plymouth-satellite-sebring.jpg + - StorageClass: + __content__: STANDARD + Owner: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + Size: + __content__: "43562" + ETag: + __content__: "\"4ead118ba91491f9c9697153264a1943\"" + LastModified: + __content__: "2006-11-15T05:51:20.000Z" + Key: + __content__: 37-cluster.jpg + Marker: + IsTruncated: + __content__: "false" diff --git a/support/faster-xml-simple/test/fixtures/test-7.rails.yml b/support/faster-xml-simple/test/fixtures/test-7.rails.yml new file mode 100644 index 0000000..1e2c149 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-7.rails.yml @@ -0,0 +1,23 @@ +--- +AccessControlPolicy: + AccessControlList: + Grant: + - Permission: + __content__: FULL_CONTROL + Grantee: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + xsi:type: CanonicalUser + - Permission: + __content__: READ + Grantee: + URI: + __content__: http://acs.amazonaws.com/groups/global/AllUsers + xsi:type: Group + Owner: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 diff --git a/support/faster-xml-simple/test/fixtures/test-7.xml b/support/faster-xml-simple/test/fixtures/test-7.xml new file mode 100644 index 0000000..f3ce35e --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-7.xml @@ -0,0 +1,22 @@ + + + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + noradio + + + + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + noradio + + FULL_CONTROL + + + + http://acs.amazonaws.com/groups/global/AllUsers + + READ + + + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-7.yml b/support/faster-xml-simple/test/fixtures/test-7.yml new file mode 100644 index 0000000..41d0254 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-7.yml @@ -0,0 +1,22 @@ +AccessControlPolicy: + Owner: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + AccessControlList: + Grant: + - Permission: + __content__: FULL_CONTROL + Grantee: + DisplayName: + __content__: noradio + ID: + __content__: bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + xsi:type: CanonicalUser + - Permission: + __content__: READ + Grantee: + URI: + __content__: http://acs.amazonaws.com/groups/global/AllUsers + xsi:type: Group diff --git a/support/faster-xml-simple/test/fixtures/test-8.rails.yml b/support/faster-xml-simple/test/fixtures/test-8.rails.yml new file mode 100644 index 0000000..7893b45 --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-8.rails.yml @@ -0,0 +1,14 @@ +--- +topic: + parent-id: {} + + title: {} + + approved: + type: boolean + id: + type: integer + viewed-at: + type: datetime + written-on: + type: date diff --git a/support/faster-xml-simple/test/fixtures/test-8.xml b/support/faster-xml-simple/test/fixtures/test-8.xml new file mode 100644 index 0000000..1191e8f --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-8.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/support/faster-xml-simple/test/fixtures/test-8.yml b/support/faster-xml-simple/test/fixtures/test-8.yml new file mode 100644 index 0000000..8ed125b --- /dev/null +++ b/support/faster-xml-simple/test/fixtures/test-8.yml @@ -0,0 +1,11 @@ +topic: + title: + id: + type: integer + approved: + type: boolean + parent-id: + viewed-at: + type: datetime + written-on: + type: date \ No newline at end of file diff --git a/support/faster-xml-simple/test/regression_test.rb b/support/faster-xml-simple/test/regression_test.rb new file mode 100644 index 0000000..5fde070 --- /dev/null +++ b/support/faster-xml-simple/test/regression_test.rb @@ -0,0 +1,47 @@ +require File.dirname(__FILE__) + '/test_helper' + +class RegressionTest < FasterXSTest + def test_content_nil_regressions + expected = {"asdf"=>{"jklsemicolon"=>{}}} + assert_equal expected, FasterXmlSimple.xml_in("") + assert_equal expected, FasterXmlSimple.xml_in("", 'forcearray'=>['asdf']) + end + + def test_s3_regression + str = File.read("test/fixtures/test-7.xml") + assert_nil FasterXmlSimple.xml_in(str)["AccessControlPolicy"]["AccessControlList"]["__content__"] + end + + def test_xml_simple_transparency + assert_equal XmlSimple.xml_in(""), FasterXmlSimple.xml_in("") + end + + def test_suppress_empty_variations + str = "" + + assert_equal Hash.new, FasterXmlSimple.xml_in(str)["asdf"]["fdsa"] + assert_nil FasterXmlSimple.xml_in(str, 'suppressempty'=>nil)["asdf"]["fdsa"] + assert_equal '', FasterXmlSimple.xml_in(str, 'suppressempty'=>'')["asdf"]["fdsa"] + assert !FasterXmlSimple.xml_in(str, 'suppressempty'=>true)["asdf"].has_key?("fdsa") + end + + def test_empty_string_doesnt_crash + assert_raise(XML::Parser::ParseError) do + silence_stderr do + FasterXmlSimple.xml_in('') + end + end + end + + def test_keeproot_false + str = "1" + expected = {"fdsa"=>"1"} + assert_equal expected, FasterXmlSimple.xml_in(str, 'keeproot'=>false) + end + + def test_keeproot_false_with_force_content + str = "1" + expected = {"fdsa"=>{"__content__"=>"1"}} + assert_equal expected, FasterXmlSimple.xml_in(str, 'keeproot'=>false, 'forcecontent'=>true) + end +end \ No newline at end of file diff --git a/support/faster-xml-simple/test/test_helper.rb b/support/faster-xml-simple/test/test_helper.rb new file mode 100644 index 0000000..3e62384 --- /dev/null +++ b/support/faster-xml-simple/test/test_helper.rb @@ -0,0 +1,17 @@ + +require 'test/unit' +require 'faster_xml_simple' + +class FasterXSTest < Test::Unit::TestCase + def default_test + end + + def silence_stderr + str = STDERR.dup + STDERR.reopen("/dev/null") + STDERR.sync=true + yield + ensure + STDERR.reopen(str) + end +end diff --git a/support/faster-xml-simple/test/xml_simple_comparison_test.rb b/support/faster-xml-simple/test/xml_simple_comparison_test.rb new file mode 100644 index 0000000..13dcc55 --- /dev/null +++ b/support/faster-xml-simple/test/xml_simple_comparison_test.rb @@ -0,0 +1,46 @@ +require File.dirname(__FILE__) + '/test_helper' +require 'yaml' + +class XmlSimpleComparisonTest < FasterXSTest + + # Define test methods + + Dir["test/fixtures/test-*.xml"].each do |file_name| + xml_file_name = file_name + method_name = File.basename(file_name, ".xml").gsub('-', '_') + yml_file_name = file_name.gsub('xml', 'yml') + rails_yml_file_name = file_name.gsub('xml', 'rails.yml') + class_eval <<-EOV, __FILE__, __LINE__ + def #{method_name} + assert_equal YAML.load(File.read('#{yml_file_name}')), + FasterXmlSimple.xml_in(File.read('#{xml_file_name}'), default_options ) + end + + def #{method_name}_rails + assert_equal YAML.load(File.read('#{rails_yml_file_name}')), + FasterXmlSimple.xml_in(File.read('#{xml_file_name}'), rails_options) + end + EOV + end + + def default_options + { + 'keeproot' => true, + 'contentkey' => '__content__', + 'forcecontent' => true, + 'suppressempty' => nil, + 'forcearray' => ['something-else'] + } + end + + def rails_options + { + 'forcearray' => false, + 'forcecontent' => true, + 'keeproot' => true, + 'contentkey' => '__content__' + } + end + + +end \ No newline at end of file diff --git a/support/rdoc/code_info.rb b/support/rdoc/code_info.rb new file mode 100644 index 0000000..e9b20d3 --- /dev/null +++ b/support/rdoc/code_info.rb @@ -0,0 +1,211 @@ +require 'rubygems' +require 'rake' +require 'rdoc/rdoc' + +module RDoc + class CodeInfo + class << self + def parse(wildcard_pattern = nil) + @info_for_corpus = parse_files(wildcard_pattern) + end + + def for(constant) + new(constant).info + end + + def info_for_corpus + raise RuntimeError, "You must first generate a corpus to search by using RDoc::CodeInfo.parse" unless @info_for_corpus + @info_for_corpus + end + + def parsed_files + info_for_corpus.map {|info| info.file_absolute_name} + end + + def files_to_parse + @files_to_parse ||= Rake::FileList.new + end + + private + def parse_files(pattern) + files = pattern ? Rake::FileList[pattern] : files_to_parse + options = Options.instance + options.parse(files << '-q', RDoc::GENERATORS) + rdoc.send(:parse_files, options) + end + + def rdoc + TopLevel.reset + rdoc = RDoc.new + stats = Stats.new + # We don't want any output so we'll override the print method + stats.instance_eval { def print; nil end } + rdoc.instance_variable_set(:@stats, stats) + rdoc + end + end + + attr_reader :info + def initialize(location) + @location = CodeLocation.new(location) + find_constant + find_method if @location.has_method? + end + + private + attr_reader :location + attr_writer :info + def find_constant + parts = location.namespace_parts + self.class.info_for_corpus.each do |file_info| + @info = parts.inject(file_info) do |result, const_part| + (result.find_module_named(const_part) || result.find_class_named(const_part)) || break + end + return if info + end + end + + def find_method + return unless info + self.info = info.method_list.detect do |method_info| + next unless method_info.name == location.method_name + if location.class_method? + method_info.singleton + elsif location.instance_method? + !method_info.singleton + else + true + end + end + end + end + + class CodeLocation + attr_reader :location + + def initialize(location) + @location = location + end + + def parts + location.split(/::|\.|#/) + end + + def namespace_parts + has_method? ? parts[0...-1] : parts + end + + def has_method? + ('a'..'z').include?(parts.last[0, 1]) + end + + def instance_method? + !location['#'].nil? + end + + def class_method? + has_method? && !location[/#|\./] + end + + def method_name + parts.last if has_method? + end + end +end + +if __FILE__ == $0 + require 'test/unit' + class CodeInfoTest < Test::Unit::TestCase + def setup + RDoc::CodeInfo.parse(__FILE__) + end + + def test_constant_lookup + assert RDoc::CodeInfo.for('RDoc') + + info = RDoc::CodeInfo.for('RDoc::CodeInfo') + assert_equal 'CodeInfo', info.name + end + + def test_method_lookup + {'RDoc::CodeInfo.parse' => true, + 'RDoc::CodeInfo::parse' => true, + 'RDoc::CodeInfo#parse' => false, + 'RDoc::CodeInfo.find_method' => true, + 'RDoc::CodeInfo::find_method' => false, + 'RDoc::CodeInfo#find_method' => true, + 'RDoc::CodeInfo#no_such_method' => false, + 'RDoc::NoSuchConst#foo' => false}.each do |location, result_of_lookup| + assert_equal result_of_lookup, !RDoc::CodeInfo.for(location).nil? + end + end + end + + class CodeLocationTest < Test::Unit::TestCase + def test_parts + {'Foo' => %w(Foo), + 'Foo::Bar' => %w(Foo Bar), + 'Foo::Bar#baz' => %w(Foo Bar baz), + 'Foo::Bar.baz' => %w(Foo Bar baz), + 'Foo::Bar::baz' => %w(Foo Bar baz), + 'Foo::Bar::Baz' => %w(Foo Bar Baz)}.each do |location, parts| + assert_equal parts, RDoc::CodeLocation.new(location).parts + end + end + + def test_namespace_parts + {'Foo' => %w(Foo), + 'Foo::Bar' => %w(Foo Bar), + 'Foo::Bar#baz' => %w(Foo Bar), + 'Foo::Bar.baz' => %w(Foo Bar), + 'Foo::Bar::baz' => %w(Foo Bar), + 'Foo::Bar::Baz' => %w(Foo Bar Baz)}.each do |location, namespace_parts| + assert_equal namespace_parts, RDoc::CodeLocation.new(location).namespace_parts + end + end + + def test_has_method? + {'Foo' => false, + 'Foo::Bar' => false, + 'Foo::Bar#baz' => true, + 'Foo::Bar.baz' => true, + 'Foo::Bar::baz' => true, + 'Foo::Bar::Baz' => false}.each do |location, has_method_result| + assert_equal has_method_result, RDoc::CodeLocation.new(location).has_method? + end + end + + def test_instance_method? + {'Foo' => false, + 'Foo::Bar' => false, + 'Foo::Bar#baz' => true, + 'Foo::Bar.baz' => false, + 'Foo::Bar::baz' => false, + 'Foo::Bar::Baz' => false}.each do |location, is_instance_method| + assert_equal is_instance_method, RDoc::CodeLocation.new(location).instance_method? + end + end + + def test_class_method? + {'Foo' => false, + 'Foo::Bar' => false, + 'Foo::Bar#baz' => false, + 'Foo::Bar.baz' => false, + 'Foo::Bar::baz' => true, + 'Foo::Bar::Baz' => false}.each do |location, is_class_method| + assert_equal is_class_method, RDoc::CodeLocation.new(location).class_method? + end + end + + def test_method_name + {'Foo' => nil, + 'Foo::Bar' => nil, + 'Foo::Bar#baz' => 'baz', + 'Foo::Bar.baz' => 'baz', + 'Foo::Bar::baz' => 'baz', + 'Foo::Bar::Baz' => nil}.each do |location, method_name| + assert_equal method_name, RDoc::CodeLocation.new(location).method_name + end + end + end +end \ No newline at end of file diff --git a/test/acl_test.rb b/test/acl_test.rb new file mode 100644 index 0000000..434d80d --- /dev/null +++ b/test/acl_test.rb @@ -0,0 +1,254 @@ +require File.dirname(__FILE__) + '/test_helper' + +class PolicyReadingTest < Test::Unit::TestCase + + def setup + @policy = prepare_policy + end + + def test_policy_owner + assert_kind_of Owner, @policy.owner + assert_equal 'bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1', @policy.owner.id + assert_equal 'mmolina@onramp.net', @policy.owner.display_name + end + + def test_grants + assert @policy.grants + assert !@policy.grants.empty? + grant = @policy.grants.first + assert_kind_of ACL::Grant, grant + assert_equal 'FULL_CONTROL', grant.permission + end + + def test_grants_have_grantee + grant = @policy.grants.first + assert grantee = grant.grantee + assert_kind_of ACL::Grantee, grantee + assert_equal 'bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1', grantee.id + assert_equal 'mmolina@onramp.net', grantee.display_name + assert_equal 'CanonicalUser', grantee.type + end + + def test_grantee_always_responds_to_email_address + assert_nothing_raised do + @policy.grants.first.grantee.email_address + end + end + + private + def prepare_policy + ACL::Policy.new(parsed_policy) + end + + def parsed_policy + Parsing::XmlParser.new Fixtures::Policies.policy_with_one_grant + end +end + +class PolicyWritingTest < PolicyReadingTest + + def setup + policy = prepare_policy + # Dump the policy to xml and retranslate it back from the xml then run all the tests in the xml reading + # test. This round tripping indirectly asserts that the original xml document is the same as the to_xml + # dump. + @policy = ACL::Policy.new(Parsing::XmlParser.new(policy.to_xml)) + end + +end + +class PolicyTest < Test::Unit::TestCase + def test_building_policy_by_hand + policy = grant = grantee = nil + assert_nothing_raised do + policy = ACL::Policy.new + grant = ACL::Grant.new + grantee = ACL::Grantee.new + grantee.email_address = 'marcel@vernix.org' + grant.permission = 'READ_ACP' + grant.grantee = grantee + policy.grants << grant + policy.owner = Owner.new('id' => '123456789', 'display_name' => 'noradio') + end + + assert_nothing_raised do + policy.to_xml + end + + assert !policy.grants.empty? + assert_equal 1, policy.grants.size + assert_equal 'READ_ACP', policy.grants.first.permission + end + + def test_include? + policy = ACL::Policy.new(Parsing::XmlParser.new(Fixtures::Policies.policy_with_one_grant)) + assert !policy.grants.include?(:public_read) + policy.grants << ACL::Grant.grant(:public_read) + assert policy.grants.include?(:public_read) + + assert policy.grants.include?(ACL::Grant.grant(:public_read)) + [false, 1, '1'].each do |non_grant| + assert !policy.grants.include?(non_grant) + end + end + + def test_delete + policy = ACL::Policy.new(Parsing::XmlParser.new(Fixtures::Policies.policy_with_one_grant)) + policy.grants << ACL::Grant.grant(:public_read) + assert policy.grants.include?(:public_read) + assert policy.grants.delete(:public_read) + assert !policy.grants.include?(:public_read) + [false, 1, '1'].each do |non_grant| + assert_nil policy.grants.delete(non_grant) + end + end + + def test_grant_list_comparison + policy = ACL::Policy.new + policy2 = ACL::Policy.new + + grant_names = [:public_read, :public_read_acp, :authenticated_write] + grant_names.each {|grant_name| policy.grants << ACL::Grant.grant(grant_name)} + grant_names.reverse_each {|grant_name| policy2.grants << ACL::Grant.grant(grant_name)} + + assert_equal policy.grants, policy2.grants + end +end + +class GrantTest < Test::Unit::TestCase + def test_permission_must_be_valid + grant = ACL::Grant.new + assert_nothing_raised do + grant.permission = 'READ_ACP' + end + + assert_raises(InvalidAccessControlLevel) do + grant.permission = 'not a valid permission' + end + end + + def test_stock_grants + assert_raises(ArgumentError) do + ACL::Grant.grant :this_is_not_a_stock_grant + end + + grant = nil + assert_nothing_raised do + grant = ACL::Grant.grant(:public_read) + end + + assert grant + assert_kind_of ACL::Grant, grant + assert_equal 'READ', grant.permission + assert grant.grantee + assert_kind_of ACL::Grantee, grant.grantee + assert_equal 'AllUsers', grant.grantee.group + end +end + +class GranteeTest < Test::Unit::TestCase + def test_type_inference + grantee = ACL::Grantee.new + + assert_nothing_raised do + grantee.type + end + + assert_nil grantee.type + grantee.group = 'AllUsers' + assert_equal 'AllUsers', grantee.group + assert_equal 'Group', grantee.type + grantee.email_address = 'marcel@vernix.org' + assert_equal 'AmazonCustomerByEmail', grantee.type + grantee.display_name = 'noradio' + assert_equal 'AmazonCustomerByEmail', grantee.type + grantee.id = '123456789' + assert_equal 'CanonicalUser', grantee.type + end + + def test_type_is_extracted_if_present + grantee = ACL::Grantee.new('xsi:type' => 'CanonicalUser') + assert_equal 'CanonicalUser', grantee.type + end + + def test_type_representation + grantee = ACL::Grantee.new('uri' => 'http://acs.amazonaws.com/groups/global/AllUsers') + + assert_equal 'AllUsers Group', grantee.type_representation + grantee.group = 'AuthenticatedUsers' + assert_equal 'AuthenticatedUsers Group', grantee.type_representation + grantee.email_address = 'marcel@vernix.org' + assert_equal 'marcel@vernix.org', grantee.type_representation + grantee.display_name = 'noradio' + grantee.id = '123456789' + assert_equal 'noradio', grantee.type_representation + end +end + +class ACLOptionProcessorTest < Test::Unit::TestCase + def test_empty_options + options = {} + assert_nothing_raised do + process! options + end + assert_equal({}, options) + end + + def test_invalid_access_level + options = {:access => :foo} + assert_raises(InvalidAccessControlLevel) do + process! options + end + end + + def test_valid_access_level_is_normalized + valid_access_levels = [ + {:access => :private}, + {'access' => 'private'}, + {:access => 'private'}, + {'access' => :private}, + {'x-amz-acl' => 'private'}, + {:x_amz_acl => :private}, + {:x_amz_acl => 'private'}, + {'x_amz_acl' => :private} + ] + + valid_access_levels.each do |options| + assert_nothing_raised do + process! options + end + assert_equal 'private', acl(options) + end + + valid_hyphenated_access_levels = [ + {:access => :public_read}, + {'access' => 'public_read'}, + {'access' => 'public-read'}, + {:access => 'public_read'}, + {:access => 'public-read'}, + {'access' => :public_read}, + + {'x-amz-acl' => 'public_read'}, + {:x_amz_acl => :public_read}, + {:x_amz_acl => 'public_read'}, + {:x_amz_acl => 'public-read'}, + {'x_amz_acl' => :public_read} + ] + + valid_hyphenated_access_levels.each do |options| + assert_nothing_raised do + process! options + end + assert_equal 'public-read', acl(options) + end + end + + private + def process!(options) + ACL::OptionProcessor.process!(options) + end + + def acl(options) + options['x-amz-acl'] + end +end diff --git a/test/authentication_test.rb b/test/authentication_test.rb new file mode 100644 index 0000000..bc16061 --- /dev/null +++ b/test/authentication_test.rb @@ -0,0 +1,96 @@ +require File.dirname(__FILE__) + '/test_helper' + +class HeaderAuthenticationTest < Test::Unit::TestCase + def test_encoded_canonical + signature = Authentication::Signature.new(request, key_id, secret) + assert_equal AmazonDocExampleData::Example1.canonical_string, signature.send(:canonical_string) + assert_equal AmazonDocExampleData::Example1.signature, signature.send(:encoded_canonical) + end + + def test_authorization_header + header = Authentication::Header.new(request, key_id, secret) + assert_equal AmazonDocExampleData::Example1.canonical_string, header.send(:canonical_string) + assert_equal AmazonDocExampleData::Example1.authorization_header, header + end + + private + def request; AmazonDocExampleData::Example1.request end + def key_id ; AmazonDocExampleData::Example1.access_key_id end + def secret ; AmazonDocExampleData::Example1.secret_access_key end +end + +class QueryStringAuthenticationTest < Test::Unit::TestCase + def test_query_string + query_string = Authentication::QueryString.new(request, key_id, secret, :expires_in => 60) + assert_equal AmazonDocExampleData::Example3.canonical_string, query_string.send(:canonical_string) + assert_equal AmazonDocExampleData::Example3.query_string, query_string + end + + def test_query_string_with_explicit_expiry + query_string = Authentication::QueryString.new(request, key_id, secret, :expires => expires) + assert_equal expires, query_string.send(:canonical_string).instance_variable_get(:@options)[:expires] + assert_equal AmazonDocExampleData::Example3.query_string, query_string + end + + private + def request; AmazonDocExampleData::Example3.request end + def key_id ; AmazonDocExampleData::Example3.access_key_id end + def secret ; AmazonDocExampleData::Example3.secret_access_key end + def expires; AmazonDocExampleData::Example3.expires end +end + +class CanonicalStringTest < Test::Unit::TestCase + def setup + @request = Net::HTTP::Post.new('/test') + @canonical_string = Authentication::CanonicalString.new(@request) + end + + def test_path_does_not_include_query_string + request = Net::HTTP::Get.new('/test/query/string?foo=bar&baz=quux') + assert_equal '/test/query/string', Authentication::CanonicalString.new(request).send(:path) + + # Make sure things still work when there is *no* query string + request = Net::HTTP::Get.new('/') + assert_equal '/', Authentication::CanonicalString.new(request).send(:path) + request = Net::HTTP::Get.new('/foo/bar') + assert_equal '/foo/bar', Authentication::CanonicalString.new(request).send(:path) + end + + def test_path_includes_significant_query_strings + significant_query_strings = [ + ['/test/query/string?acl', '/test/query/string?acl'], + ['/test/query/string?acl&foo=bar', '/test/query/string?acl'], + ['/test/query/string?foo=bar&acl', '/test/query/string?acl'], + ['/test/query/string?acl=foo', '/test/query/string?acl'], + ['/test/query/string?torrent=foo', '/test/query/string?torrent'], + ['/test/query/string?logging=foo', '/test/query/string?logging'], + ['/test/query/string?bar=baz&acl=foo', '/test/query/string?acl'] + ] + + significant_query_strings.each do |uncleaned_path, expected_cleaned_path| + assert_equal expected_cleaned_path, Authentication::CanonicalString.new(Net::HTTP::Get.new(uncleaned_path)).send(:path) + end + end + + def test_default_headers_set + Authentication::CanonicalString.default_headers.each do |header| + assert @canonical_string.headers.include?(header) + end + end + + def test_interesting_headers_are_copied_over + an_interesting_header = 'content-md5' + string_without_interesting_header = Authentication::CanonicalString.new(@request) + assert string_without_interesting_header.headers[an_interesting_header].empty? + + # Add an interesting header + @request[an_interesting_header] = 'foo' + string_with_interesting_header = Authentication::CanonicalString.new(@request) + assert_equal 'foo', string_with_interesting_header.headers[an_interesting_header] + end + + def test_canonical_string + request = AmazonDocExampleData::Example1.request + assert_equal AmazonDocExampleData::Example1.canonical_string, Authentication::CanonicalString.new(request) + end +end \ No newline at end of file diff --git a/test/base_test.rb b/test/base_test.rb new file mode 100644 index 0000000..2dc2959 --- /dev/null +++ b/test/base_test.rb @@ -0,0 +1,143 @@ +require File.dirname(__FILE__) + '/test_helper' + +class BaseTest < Test::Unit::TestCase + def test_connection_established + assert_raises(NoConnectionEstablished) do + Base.connection + end + + Base.establish_connection!(:access_key_id => '123', :secret_access_key => 'abc') + assert_kind_of Connection, Base.connection + + instance = Base.new + assert_equal instance.send(:connection), Base.connection + assert_equal instance.send(:http), Base.connection.http + end + + def test_respond_with + assert_equal Base::Response, Base.send(:response_class) + Base.send(:respond_with, Bucket::Response) do + assert_equal Bucket::Response, Base.send(:response_class) + end + assert_equal Base::Response, Base.send(:response_class) + end + + def test_request_tries_again_when_encountering_an_internal_error + Bucket.in_test_mode do + Bucket.request_returns [ + # First request is an internal error + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Second request is a success + {:body => Fixtures::Buckets.empty_bucket, :code => 200} + ] + bucket = nil # Block scope hack + assert_nothing_raised do + bucket = Bucket.find('marcel') + end + # Don't call objects 'cause we don't want to make another request + assert bucket.object_cache.empty? + end + end + + def test_request_tries_up_to_three_times + Bucket.in_test_mode do + Bucket.request_returns [ + # First request is an internal error + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Second request is also an internal error + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Ditto third + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Fourth works + {:body => Fixtures::Buckets.empty_bucket, :code => 200} + ] + bucket = nil # Block scope hack + assert_nothing_raised do + bucket = Bucket.find('marcel') + end + # Don't call objects 'cause we don't want to make another request + assert bucket.object_cache.empty? + end + end + + def test_request_tries_again_three_times_and_gives_up + Bucket.in_test_mode do + Bucket.request_returns [ + # First request is an internal error + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Second request is also an internal error + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Ditto third + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + # Ditto fourth + {:body => Fixtures::Errors.internal_error, :code => 500, :error => true}, + ] + assert_raises(InternalError) do + Bucket.find('marcel') + end + end + end +end + +class MultiConnectionsTest < Test::Unit::TestCase + class ClassToTestSettingCurrentBucket < Base + set_current_bucket_to 'foo' + end + + def setup + Base.send(:connections).clear + end + alias_method :teardown, :setup + + def test_default_connection_options_are_used_for_subsequent_connections + assert !Base.connected? + + assert_raises(MissingAccessKey) do + Base.establish_connection! + end + + assert !Base.connected? + + assert_raises(NoConnectionEstablished) do + Base.connection + end + + assert_nothing_raised do + Base.establish_connection!(:access_key_id => '123', :secret_access_key => 'abc') + end + + assert Base.connected? + + assert_nothing_raised do + Base.connection + end + + # All subclasses are currently using the default connection + assert Base.connection == Bucket.connection + + # No need to pass in the required options. The default connection will supply them + assert_nothing_raised do + Bucket.establish_connection!(:server => 'foo.s3.amazonaws.com') + end + + assert Base.connection != Bucket.connection + assert_equal '123', Bucket.connection.access_key_id + assert_equal 'foo', Bucket.connection.subdomain + end + + def test_current_bucket + Base.establish_connection!(:access_key_id => '123', :secret_access_key => 'abc') + assert_raises(CurrentBucketNotSpecified) do + Base.current_bucket + end + + S3Object.establish_connection!(:server => 'foo-bucket.s3.amazonaws.com') + assert_nothing_raised do + assert_equal 'foo-bucket', S3Object.current_bucket + end + end + + def test_setting_the_current_bucket + assert_equal 'foo', ClassToTestSettingCurrentBucket.current_bucket + end +end diff --git a/test/bucket_test.rb b/test/bucket_test.rb new file mode 100644 index 0000000..039e32d --- /dev/null +++ b/test/bucket_test.rb @@ -0,0 +1,48 @@ +require File.dirname(__FILE__) + '/test_helper' + +class BucketTest < Test::Unit::TestCase + def test_bucket_name_validation + valid_names = %w(123 joe step-one step_two step3 step_4 step-5 step.six) + invalid_names = ['12', 'jo', 'kevin spacey', 'larry@wall', '', 'a' * 256] + validate_name = Proc.new {|name| Bucket.send(:validate_name!, name)} + valid_names.each do |valid_name| + assert_nothing_raised { validate_name[valid_name] } + end + + invalid_names.each do |invalid_name| + assert_raises(InvalidBucketName) { validate_name[invalid_name] } + end + end + + def test_empty_bucket + Bucket.request_always_returns :body => Fixtures::Buckets.empty_bucket, :code => 200 do + bucket = Bucket.find('marcel_molina') + assert bucket.empty? + end + end + + def test_bucket_with_one_file + Bucket.request_always_returns :body => Fixtures::Buckets.bucket_with_one_key, :code => 200 do + bucket = Bucket.find('marcel_molina') + assert !bucket.empty? + assert_equal 1, bucket.size + assert_equal %w(tongue_overload.jpg), bucket.objects.map {|object| object.key} + assert bucket['tongue_overload.jpg'] + end + end + + def test_bucket_with_more_than_one_file + Bucket.request_always_returns :body => Fixtures::Buckets.bucket_with_more_than_one_key, :code => 200 do + bucket = Bucket.find('marcel_molina') + assert !bucket.empty? + assert_equal 2, bucket.size + assert_equal %w(beluga_baby.jpg tongue_overload.jpg), bucket.objects.map {|object| object.key}.sort + assert bucket['tongue_overload.jpg'] + end + end + + def test_bucket_path + assert_equal '/bucket_name?max-keys=2', Bucket.send(:path, 'bucket_name', :max_keys => 2) + assert_equal '/bucket_name', Bucket.send(:path, 'bucket_name', {}) + end +end \ No newline at end of file diff --git a/test/connection_test.rb b/test/connection_test.rb new file mode 100644 index 0000000..c0ac516 --- /dev/null +++ b/test/connection_test.rb @@ -0,0 +1,190 @@ +require File.dirname(__FILE__) + '/test_helper' + +class ConnectionTest < Test::Unit::TestCase + def setup + @keys = {:access_key_id => '123', :secret_access_key => 'abc'} + end + + def test_creating_a_connection + connection = Connection.new(@keys) + assert_kind_of Net::HTTP, connection.http + end + + def test_use_ssl_option_is_set_in_connection + connection = Connection.new(@keys.merge(:use_ssl => true)) + assert connection.http.use_ssl? + end + + def test_setting_port_to_443_implies_use_ssl + connection = Connection.new(@keys.merge(:port => 443)) + assert connection.http.use_ssl? + end + + def test_protocol + connection = Connection.new(@keys) + assert_equal 'http://', connection.protocol + connection = Connection.new(@keys.merge(:use_ssl => true)) + assert_equal 'https://', connection.protocol + end + + def test_connection_is_persistent_by_default + connection = Connection.new(@keys) + assert connection.persistent? + + connection = Connection.new(@keys.merge(:persistent => false)) + assert !connection.persistent? + end + + def test_server_and_port_are_passed_onto_connection + connection = Connection.new(@keys) + options = connection.instance_variable_get('@options') + assert_equal connection.http.address, options[:server] + assert_equal connection.http.port, options[:port] + end + + def test_not_including_required_access_keys_raises + assert_raises(MissingAccessKey) do + Connection.new + end + + assert_raises(MissingAccessKey) do + Connection.new(:access_key_id => '123') + end + + assert_nothing_raised do + Connection.new(@keys) + end + end + + def test_access_keys_extracted + connection = Connection.new(@keys) + assert_equal '123', connection.access_key_id + assert_equal 'abc', connection.secret_access_key + end + + def test_request_method_class_lookup + c = Connection.new(@keys) + expectations = { + :get => Net::HTTP::Get, :post => Net::HTTP::Post, + :put => Net::HTTP::Put, :delete => Net::HTTP::Delete, + :head => Net::HTTP::Head + } + + expectations.each do |verb, klass| + assert_equal klass, c.send(:request_method, verb) + end + end + + def test_url_for_uses_default_protocol_server_and_port + connection = Connection.new(:access_key_id => '123', :secret_access_key => 'abc', :port => 80) + assert_match %r(^http://s3\.amazonaws\.com/foo\?), connection.url_for('/foo') + + connection = Connection.new(:access_key_id => '123', :secret_access_key => 'abc', :use_ssl => true, :port => 443) + assert_match %r(^https://s3\.amazonaws\.com/foo\?), connection.url_for('/foo') + end + + def test_url_for_remembers_custom_protocol_server_and_port + connection = Connection.new(:access_key_id => '123', :secret_access_key => 'abc', :server => 'example.org', :port => 555, :use_ssl => true) + assert_match %r(^https://example\.org:555/foo\?), connection.url_for('/foo') + end + + def test_url_for_with_and_without_authenticated_urls + connection = Connection.new(:access_key_id => '123', :secret_access_key => 'abc', :server => 'example.org') + authenticated = lambda {|url| url['?AWSAccessKeyId']} + assert authenticated[connection.url_for('/foo')] + assert authenticated[connection.url_for('/foo', :authenticated => true)] + assert !authenticated[connection.url_for('/foo', :authenticated => false)] + end + + def test_connecting_through_a_proxy + connection = nil + assert_nothing_raised do + connection = Connection.new(@keys.merge(:proxy => sample_proxy_settings)) + end + assert connection.http.proxy? + end +end + +class ConnectionOptionsTest < Test::Unit::TestCase + + def setup + @options = generate_options(:server => 'example.org', :port => 555) + @default_options = generate_options + end + + def test_server_extracted + assert_key_transfered(:server, 'example.org', @options) + end + + def test_port_extracted + assert_key_transfered(:port, 555, @options) + end + + def test_server_defaults_to_default_host + assert_equal DEFAULT_HOST, @default_options[:server] + end + + def test_port_defaults_to_80_if_use_ssl_is_false + assert_equal 80, @default_options[:port] + end + + def test_port_is_set_to_443_if_use_ssl_is_true + options = generate_options(:use_ssl => true) + assert_equal 443, options[:port] + end + + def test_explicit_port_trumps_use_ssl + options = generate_options(:port => 555, :use_ssl => true) + assert_equal 555, options[:port] + end + + def test_invalid_options_raise + assert_raises(InvalidConnectionOption) do + generate_options(:host => 'campfire.s3.amazonaws.com') + end + end + + def test_not_specifying_all_required_proxy_settings_raises + assert_raises(ArgumentError) do + generate_options(:proxy => {}) + end + end + + def test_not_specifying_proxy_option_at_all_does_not_raise + assert_nothing_raised do + generate_options + end + end + + def test_specifying_all_required_proxy_settings + assert_nothing_raised do + generate_options(:proxy => sample_proxy_settings) + end + end + + def test_only_host_setting_is_required + assert_nothing_raised do + generate_options(:proxy => {:host => 'http://google.com'}) + end + end + + def test_proxy_settings_are_extracted + options = generate_options(:proxy => sample_proxy_settings) + assert_equal sample_proxy_settings.values.map {|value| value.to_s}.sort, options.proxy_settings.map {|value| value.to_s}.sort + end + + def test_recognizing_that_the_settings_want_to_connect_through_a_proxy + options = generate_options(:proxy => sample_proxy_settings) + assert options.connecting_through_proxy? + end + + private + def assert_key_transfered(key, value, options) + assert_equal value, options[key] + assert !options.instance_variable_get('@options').has_key?(key) + end + + def generate_options(options = {}) + Connection::Options.new(options) + end +end diff --git a/test/error_test.rb b/test/error_test.rb new file mode 100644 index 0000000..7394e75 --- /dev/null +++ b/test/error_test.rb @@ -0,0 +1,75 @@ +require File.dirname(__FILE__) + '/test_helper' + +class ErrorTest < Test::Unit::TestCase + def setup + @container = AWS::S3 + @error = Error.new(Parsing::XmlParser.new(Fixtures::Errors.access_denied)) + end + + def teardown + @container.send(:remove_const, :NotImplemented) if @container.const_defined?(:NotImplemented) + end + + def test_error_class_is_automatically_generated + assert !@container.const_defined?('NotImplemented') + error = Error.new(Parsing::XmlParser.new(Fixtures::Errors.not_implemented)) + assert @container.const_defined?('NotImplemented') + end + + def test_error_contains_attributes + assert_equal 'Access Denied', @error.message + end + + def test_error_is_raisable_as_exception + assert_raises(@container::AccessDenied) do + @error.raise + end + end + + def test_error_message_is_passed_along_to_exception + @error.raise + rescue @container::AccessDenied => e + assert_equal 'Access Denied', e.message + end + + def test_response_is_passed_along_to_exception + response = Error::Response.new(FakeResponse.new(:code => 409, :body => Fixtures::Errors.access_denied)) + response.error.raise + rescue @container::ResponseError => e + assert e.response + assert_kind_of Error::Response, e.response + assert_equal response.error, e.response.error + end + + def test_exception_class_clash + assert !@container.const_defined?(:NotImplemented) + # Create a class that does not inherit from exception that has the same name as the class + # the Error instance is about to attempt to find or create + @container.const_set(:NotImplemented, Class.new) + assert @container.const_defined?(:NotImplemented) + + assert_raises(ExceptionClassClash) do + Error.new(Parsing::XmlParser.new(Fixtures::Errors.not_implemented)) + end + end + + def test_error_response_handles_attributes_with_no_value + Bucket.in_test_mode do + Bucket.request_returns :body => Fixtures::Errors.error_with_no_message, :code => 500 + + begin + Bucket.create('foo', 'invalid-argument' => 'bad juju') + rescue ResponseError => error + end + + assert_nothing_raised do + error.response.error.message + end + assert_nil error.response.error.message + + assert_raises(NoMethodError) do + error.response.error.non_existant_method + end + end + end +end \ No newline at end of file diff --git a/test/extensions_test.rb b/test/extensions_test.rb new file mode 100644 index 0000000..677f7c5 --- /dev/null +++ b/test/extensions_test.rb @@ -0,0 +1,331 @@ +require File.dirname(__FILE__) + '/test_helper' + +class HashExtensionsTest < Test::Unit::TestCase + def test_to_query_string + # Because hashes aren't ordered, I'm mostly testing against hashes with just one key + symbol_keys = {:one => 1} + string_keys = {'one' => 1} + expected = '?one=1' + [symbol_keys, string_keys].each do |hash| + assert_equal expected, hash.to_query_string + end + end + + def test_empty_hash_returns_no_query_string + assert_equal '', {}.to_query_string + end + + def test_include_question_mark + hash = {:one => 1} + assert_equal '?one=1', hash.to_query_string + assert_equal 'one=1', hash.to_query_string(false) + end + + def test_elements_joined_by_ampersand + hash = {:one => 1, :two => 2} + qs = hash.to_query_string + assert qs['one=1&two=2'] || qs['two=2&one=1'] + end + + def test_normalized_options + expectations = [ + [{:foo_bar => 1}, {'foo-bar' => '1'}], + [{'foo_bar' => 1}, {'foo-bar' => '1'}], + [{'foo-bar' => 1}, {'foo-bar' => '1'}], + [{}, {}] + ] + + expectations.each do |(before, after)| + assert_equal after, before.to_normalized_options + end + end +end + +class StringExtensionsTest < Test::Unit::TestCase + def test_previous + expectations = {'abc' => 'abb', '123' => '122', '1' => '0'} + expectations.each do |before, after| + assert_equal after, before.previous + end + end + + def test_to_header + transformations = { + 'foo' => 'foo', + :foo => 'foo', + 'foo-bar' => 'foo-bar', + 'foo_bar' => 'foo-bar', + :foo_bar => 'foo-bar', + 'Foo-Bar' => 'foo-bar', + 'Foo_Bar' => 'foo-bar' + } + + transformations.each do |before, after| + assert_equal after, before.to_header + end + end + + def test_utf8? + assert !"318597/620065/GTL_75\24300_A600_A610.zip".utf8? + assert "318597/620065/GTL_75£00_A600_A610.zip".utf8? + end + + def test_remove_extended + assert "318597/620065/GTL_75\24300_A600_A610.zip".remove_extended.utf8? + assert "318597/620065/GTL_75£00_A600_A610.zip".remove_extended.utf8? + end +end + +class CoercibleStringTest < Test::Unit::TestCase + + def test_coerce + coercions = [ + ['1', 1], + ['false', false], + ['true', true], + ['2006-10-29T23:14:47.000Z', Time.parse('2006-10-29T23:14:47.000Z')], + ['Hello!', 'Hello!'], + ['false23', 'false23'], + ['03 1-2-3-Apple-Tree.mp3', '03 1-2-3-Apple-Tree.mp3'], + ['0815', '0815'] # This number isn't coerced because the leading zero would be lost + ] + + coercions.each do |before, after| + assert_nothing_raised do + assert_equal after, CoercibleString.coerce(before) + end + end + end +end + +class KerneltExtensionsTest < Test::Unit::TestCase + class Foo + def foo + __method__ + end + + def bar + foo + end + + def baz + bar + end + end + + class Bar + def foo + calling_method + end + + def bar + calling_method + end + + def calling_method + __method__(1) + end + end + + def test___method___works_regardless_of_nesting + f = Foo.new + [:foo, :bar, :baz].each do |method| + assert_equal 'foo', f.send(method) + end + end + + def test___method___depth + b = Bar.new + assert_equal 'foo', b.foo + assert_equal 'bar', b.bar + end +end + +class ModuleExtensionsTest < Test::Unit::TestCase + class Foo + def foo(reload = false) + memoize(reload) do + Time.now + end + end + + def bar(reload = false) + memoize(reload, :baz) do + Time.now + end + end + + def quux + Time.now + end + memoized :quux + end + + def setup + @instance = Foo.new + end + + def test_memoize + assert !@instance.instance_variables.include?('@foo') + cached_result = @instance.foo + assert_equal cached_result, @instance.foo + assert @instance.instance_variables.include?('@foo') + assert_equal cached_result, @instance.send(:instance_variable_get, :@foo) + assert_not_equal cached_result, new_cache = @instance.foo(:reload) + assert_equal new_cache, @instance.foo + assert_equal new_cache, @instance.send(:instance_variable_get, :@foo) + end + + def test_customizing_memoize_storage + assert !@instance.instance_variables.include?('@bar') + assert !@instance.instance_variables.include?('@baz') + cached_result = @instance.bar + assert !@instance.instance_variables.include?('@bar') + assert @instance.instance_variables.include?('@baz') + assert_equal cached_result, @instance.bar + assert_equal cached_result, @instance.send(:instance_variable_get, :@baz) + assert_nil @instance.send(:instance_variable_get, :@bar) + end + + def test_memoized + assert !@instance.instance_variables.include?('@quux') + cached_result = @instance.quux + assert_equal cached_result, @instance.quux + assert @instance.instance_variables.include?('@quux') + assert_equal cached_result, @instance.send(:instance_variable_get, :@quux) + assert_not_equal cached_result, new_cache = @instance.quux(:reload) + assert_equal new_cache, @instance.quux + assert_equal new_cache, @instance.send(:instance_variable_get, :@quux) + end + + def test_constant_setting + some_module = Module.new + assert !some_module.const_defined?(:FOO) + assert_nothing_raised do + some_module.constant :FOO, 'bar' + end + + assert some_module.const_defined?(:FOO) + assert_nothing_raised do + some_module::FOO + some_module.foo + end + assert_equal 'bar', some_module::FOO + assert_equal 'bar', some_module.foo + + assert_nothing_raised do + some_module.constant :FOO, 'baz' + end + + assert_equal 'bar', some_module::FOO + assert_equal 'bar', some_module.foo + end +end + +class AttributeProxyTest < Test::Unit::TestCase + class BlindProxyUsingDefaultAttributesHash + include SelectiveAttributeProxy + proxy_to :exlusively => false + end + + class BlindProxyUsingCustomAttributeHash + include SelectiveAttributeProxy + proxy_to :settings + end + + class ProxyUsingPassedInAttributeHash + include SelectiveAttributeProxy + + def initialize(attributes = {}) + @attributes = attributes + end + end + + class RestrictedProxy + include SelectiveAttributeProxy + + private + def proxiable_attribute?(name) + %w(foo bar baz).include?(name) + end + end + + class NonExclusiveProxy + include SelectiveAttributeProxy + proxy_to :settings, :exclusively => false + end + + def test_using_all_defaults + b = BlindProxyUsingDefaultAttributesHash.new + assert_nothing_raised do + b.foo = 'bar' + end + + assert_nothing_raised do + b.foo + end + + assert_equal 'bar', b.foo + end + + def test_storage_is_autovivified + b = BlindProxyUsingDefaultAttributesHash.new + assert_nothing_raised do + b.send(:attributes)['foo'] = 'bar' + end + + assert_nothing_raised do + b.foo + end + + assert_equal 'bar', b.foo + end + + def test_limiting_which_attributes_are_proxiable + r = RestrictedProxy.new + assert_nothing_raised do + r.foo = 'bar' + end + + assert_nothing_raised do + r.foo + end + + assert_equal 'bar', r.foo + + assert_raises(NoMethodError) do + r.quux = 'foo' + end + + assert_raises(NoMethodError) do + r.quux + end + end + + def test_proxying_is_exclusive_by_default + p = ProxyUsingPassedInAttributeHash.new('foo' => 'bar') + assert_nothing_raised do + p.foo + p.foo = 'baz' + end + + assert_equal 'baz', p.foo + + assert_raises(NoMethodError) do + p.quux + end + end + + def test_setting_the_proxy_as_non_exclusive + n = NonExclusiveProxy.new + assert_nothing_raised do + n.foo = 'baz' + end + + assert_nothing_raised do + n.foo + end + + assert_equal 'baz', n.foo + end +end \ No newline at end of file diff --git a/test/fixtures.rb b/test/fixtures.rb new file mode 100644 index 0000000..a103d98 --- /dev/null +++ b/test/fixtures.rb @@ -0,0 +1,89 @@ +require 'yaml' + +module AWS + module S3 + # When this file is loaded, for each fixture file, a module is created within the Fixtures module + # with the same name as the fixture file. For each fixture in that fixture file, a singleton method is + # added to the module with the name of the given fixture, returning the value of the fixture. + # + # For example: + # + # A fixture in buckets.yml named empty_bucket_list with value hi! + # would be made available like so: + # + # Fixtures::Buckets.empty_bucket_list + # => "hi!" + # + # Alternatively you can treat the fixture module like a hash + # + # Fixtures::Buckets[:empty_bucket_list] + # => "hi!" + # + # You can find out all available fixtures by calling + # + # Fixtures.fixtures + # => ["Buckets"] + # + # And all the fixtures contained in a given fixture by calling + # + # Fixtures::Buckets.fixtures + # => ["bucket_list_with_more_than_one_bucket", "bucket_list_with_one_bucket", "empty_bucket_list"] + module Fixtures + class << self + def create_fixtures + files.each do |file| + create_fixture_for(file) + end + end + + def create_fixture_for(file) + fixtures = YAML.load_file(path(file)) + fixture_module = Module.new + + fixtures.each do |name, value| + fixture_module.module_eval(<<-EVAL, __FILE__, __LINE__) + def #{name} + #{value.inspect} + end + module_function :#{name} + EVAL + end + + fixture_module.module_eval(<<-EVAL, __FILE__, __LINE__) + module_function + + def fixtures + #{fixtures.keys.sort.inspect} + end + + def [](name) + send(name) if fixtures.include?(name.to_s) + end + EVAL + + const_set(module_name(file), fixture_module) + end + + def fixtures + constants.sort + end + + private + + def files + Dir.glob(File.dirname(__FILE__) + '/fixtures/*.yml').map {|fixture| File.basename(fixture)} + end + + def module_name(file_name) + File.basename(file_name, '.*').capitalize + end + + def path(file_name) + File.join(File.dirname(__FILE__), 'fixtures', file_name) + end + end + + create_fixtures + end + end +end \ No newline at end of file diff --git a/test/fixtures/buckets.yml b/test/fixtures/buckets.yml new file mode 100644 index 0000000..37fae58 --- /dev/null +++ b/test/fixtures/buckets.yml @@ -0,0 +1,102 @@ +empty_bucket_list: > + + + ab00c3106e091f8fe23154c85678cda66628adb330bc00f02cf4a1c36d76bc48 + amazon + + + + + +bucket_list_with_one_bucket: > + + + ab00c3106e091f8fe23154c85678cda66628adb330bc00f02cf4a1c36d76bc48 + amazon + + + + marcel_molina + 2006-10-04T15:58:38.000Z + + + + + +bucket_list_with_more_than_one_bucket: > + + + ab00c3106e091f8fe23154c85678cda66628adb330bc00f02cf4a1c36d76bc48 + amazon + + + + marcel_molina + 2006-10-04T15:58:38.000Z + + + marcel_molina_jr + 2006-10-04T16:01:30.000Z + + + + +empty_bucket: > + + marcel_molina + + + 1000 + false + + +bucket_with_one_key: > + + marcel_molina + + + 1000 + false + + tongue_overload.jpg + 2006-10-05T02:42:22.000Z + "f21f7c4e8ea6e34b268887b07d6da745" + 60673 + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + mmolina@onramp.net + + STANDARD + + + +bucket_with_more_than_one_key: > + + marcel_molina + + + 1000 + false + + beluga_baby.jpg + 2006-10-05T02:55:10.000Z + "b2453d4a39a7387674a8c505112a2f0b" + 35807 + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + mmolina@onramp.net + + STANDARD + + + tongue_overload.jpg + 2006-10-05T02:42:22.000Z + "f21f7c4e8ea6e34b268887b07d6da745" + 60673 + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + mmolina@onramp.net + + STANDARD + + diff --git a/test/fixtures/errors.yml b/test/fixtures/errors.yml new file mode 100644 index 0000000..cdd2c74 --- /dev/null +++ b/test/fixtures/errors.yml @@ -0,0 +1,34 @@ +not_implemented: > + + NotImplemented + A header you provided implies functionality that is not implemented + D1D13A09AC92427F +
Host
+ oNZgzTTmWiovwGGwHXAzz+1vRmAJVAplS9TF7B0cuOGfEwoi7DYSTa/1Qhv90CfW +
+ +access_denied: > + + AccessDenied + Access Denied + F99F6D58B96C98E0 + XwCF7k3llrcEwtoHR7MusZ6ilCdF5DKDmwYpglvjKNjvwo24INCeXlEpo1M03Wxm + + +internal_error: > + + InternalError + Internal Error + F99F6D223B96C98E0 + XwCF7k3llrcEwtoHR7MusZ6ilCdF5DKDmwYpglvjKNjvwo24INCeXlEpo1M03Wxm + + +error_with_no_message: > + + InvalidArgument + + READ + 74A377B1C0FA2BCF + cP4rqsAEtHpN6Ckv08Hr3LXjLzx15/YgyoSqzs779vMR8MrAFSodxZp96wtuMQuI + x-amz-acl + \ No newline at end of file diff --git a/test/fixtures/headers.yml b/test/fixtures/headers.yml new file mode 100644 index 0000000..528e63b --- /dev/null +++ b/test/fixtures/headers.yml @@ -0,0 +1,3 @@ +headers_including_one_piece_of_metadata: + x-amz-meta-test: foo + content_type: text/plain \ No newline at end of file diff --git a/test/fixtures/logging.yml b/test/fixtures/logging.yml new file mode 100644 index 0000000..e997791 --- /dev/null +++ b/test/fixtures/logging.yml @@ -0,0 +1,15 @@ +logging_enabled: > + + + mylogs + access_log- + + + +logging_disabled: > + + + \ No newline at end of file diff --git a/test/fixtures/loglines.yml b/test/fixtures/loglines.yml new file mode 100644 index 0000000..d96c09f --- /dev/null +++ b/test/fixtures/loglines.yml @@ -0,0 +1,5 @@ +bucket_get: + "bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 marcel [14/Nov/2006:06:36:48 +0000] 67.165.183.125 bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 8B5297D428A05432 REST.GET.BUCKET - \"GET /marcel HTTP/1.1\" 200 - 4534 - 398 395 \"-\" \"-\"\n" + +browser_get: + "bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 marcel [25/Nov/2006:06:26:23 +0000] 67.165.183.125 65a011a29cdf8ec533ec3d1ccaae921c 41521D07CA012312 REST.GET.OBJECT kiss.jpg \"GET /marcel/kiss.jpg HTTP/1.1\" 200 - 67748 67748 259 104 \"-\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0\"\n" \ No newline at end of file diff --git a/test/fixtures/logs.yml b/test/fixtures/logs.yml new file mode 100644 index 0000000..5f29552 --- /dev/null +++ b/test/fixtures/logs.yml @@ -0,0 +1,7 @@ +simple_log: + - "bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 marcel [14/Nov/2006:06:36:48 +0000] 67.165.183.125 bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 8B5297D428A05432 REST.GET.BUCKET - \"GET /marcel HTTP/1.1\" 200 - 4534 - 398 395 \"-\" \"-\"\n" + - "bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 marcel [14/Nov/2006:06:38:58 +0000] 67.165.183.125 bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 8F6F3C4027849420 REST.GET.BUCKET - \"GET /marcel HTTP/1.1\" 200 - 4534 - 458 456 \"-\" \"-\"\n" + +requests_from_a_browser: + - "bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 marcel [25/Nov/2006:06:26:23 +0000] 67.165.183.125 65a011a29cdf8ec533ec3d1ccaae921c 41521D07CA012312 REST.GET.OBJECT kiss.jpg \"GET /marcel/kiss.jpg HTTP/1.1\" 200 - 67748 67748 259 104 \"-\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0\"\n" + - "bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 marcel [25/Nov/2006:06:26:27 +0000] 67.165.183.125 65a011a29cdf8ec533ec3d1ccaae921c 88629578AFDDD9B5 REST.GET.TORRENT kiss.jpg \"GET /marcel/kiss.jpg?torrent HTTP/1.1\" 200 - 215 - 379 - \"-\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0\"\n" \ No newline at end of file diff --git a/test/fixtures/policies.yml b/test/fixtures/policies.yml new file mode 100644 index 0000000..b84c463 --- /dev/null +++ b/test/fixtures/policies.yml @@ -0,0 +1,16 @@ +policy_with_one_grant: > + + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + mmolina@onramp.net + + + + + bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1 + mmolina@onramp.net + + FULL_CONTROL + + + \ No newline at end of file diff --git a/test/logging_test.rb b/test/logging_test.rb new file mode 100644 index 0000000..c4c0259 --- /dev/null +++ b/test/logging_test.rb @@ -0,0 +1,89 @@ +require File.dirname(__FILE__) + '/test_helper' + +class LoggingStatusReadingTest < Test::Unit::TestCase + + def setup + @disabled = logging_status(:logging_disabled) + @enabled = logging_status(:logging_enabled) + @new_status = Logging::Status.new('target_bucket' => 'foo', 'target_prefix' => 'access-log-') + end + + def test_logging_enabled? + assert !@disabled.logging_enabled? + assert !@new_status.logging_enabled? + assert @enabled.logging_enabled? + end + + def test_passing_in_prefix_and_bucket + assert_equal 'foo', @new_status.target_bucket + assert_equal 'access-log-', @new_status.target_prefix + assert !@new_status.logging_enabled? + end + + private + def logging_status(fixture) + Logging::Status.new(Parsing::XmlParser.new(Fixtures::Logging[fixture.to_s])) + end +end + +class LoggingStatusWritingTest < LoggingStatusReadingTest + def setup + super + @disabled = Logging::Status.new(Parsing::XmlParser.new(@disabled.to_xml)) + @enabled = Logging::Status.new(Parsing::XmlParser.new(@enabled.to_xml)) + end +end + +class LogTest < Test::Unit::TestCase + def test_value_converted_to_log_lines + log_object = S3Object.new + log_object.value = Fixtures::Logs.simple_log.join + log = Logging::Log.new(log_object) + assert_nothing_raised do + log.lines + end + + assert_equal 2, log.lines.size + assert_kind_of Logging::Log::Line, log.lines.first + assert_equal 'marcel', log.lines.first.bucket + end +end + +class LogLineTest < Test::Unit::TestCase + def setup + @line = Logging::Log::Line.new(Fixtures::Loglines.bucket_get) + end + + def test_field_accessors + expected_results = { + :owner => Owner.new('id' => 'bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1'), + :bucket => 'marcel', + :time => Time.parse('11/14/2006 06:36:48 +0000'), + :remote_ip => '67.165.183.125', + :request_id => '8B5297D428A05432', + :requestor => Owner.new('id' => 'bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1'), + :operation => 'REST.GET.BUCKET', + :key => nil, + :request_uri => 'GET /marcel HTTP/1.1', + :http_status => 200, + :error_code => nil, + :bytes_sent => 4534, + :object_size => nil, + :total_time => 398, + :turn_around_time => 395, + :referrer => nil, + :user_agent => nil + } + + expected_results.each do |field, expected| + assert_equal expected, @line.send(field) + end + + assert_equal expected_results, @line.attributes + end + + def test_user_agent + line = Logging::Log::Line.new(Fixtures::Loglines.browser_get) + assert_equal 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0', line.user_agent + end +end \ No newline at end of file diff --git a/test/mocks/base.rb b/test/mocks/base.rb new file mode 100644 index 0000000..d467a7a --- /dev/null +++ b/test/mocks/base.rb @@ -0,0 +1,89 @@ +require_library_or_gem 'flexmock' + +module AWS + module S3 + class FakeResponse < String + attr_reader :code, :body, :headers + def initialize(options = {}) + @code = options.delete(:code) || 200 + @body = options.delete(:body) || '' + @headers = {'content-type' => 'application/xml'}.merge(options.delete(:headers) || {}) + super(@body) + end + + # For ErrorResponse + def response + self + end + + def [](header) + headers[header] + end + + def each(&block) + headers.each(&block) + end + alias_method :each_header, :each + end + + class Base + class << self + @@responses = [] + @@in_test_mode = false + @@catch_all_response = nil + + def in_test_mode=(boolean) + @@in_test_mode = boolean + end + + def responses + @@responses + end + + def catch_all_response + @@catch_all_response + end + + def reset! + responses.clear + end + + def request_returns(response_data) + responses.concat [response_data].flatten.map {|data| FakeResponse.new(data)} + end + + def request_always_returns(response_data, &block) + in_test_mode do + @@catch_all_response = FakeResponse.new(response_data) + yield + @@catch_all_response = nil + end + end + + def in_test_mode(&block) + self.in_test_mode = true + yield + ensure + self.in_test_mode = false + end + + alias_method :old_connection, :connection + def connection + if @@in_test_mode + @mock_connection ||= + begin + mock_connection = FlexMock.new + mock_connection.mock_handle(:request) do + raise 'No responses left' unless response = catch_all_response || responses.shift + response + end + mock_connection + end + else + old_connection + end + end + end + end + end +end \ No newline at end of file diff --git a/test/object_test.rb b/test/object_test.rb new file mode 100644 index 0000000..acce149 --- /dev/null +++ b/test/object_test.rb @@ -0,0 +1,217 @@ +require File.dirname(__FILE__) + '/test_helper' + +class ObjectTest < Test::Unit::TestCase + def setup + bucket = Bucket.new(Parsing::XmlParser.new(Fixtures::Buckets.bucket_with_one_key)) + @object = bucket.objects.first + end + + def test_header_settings_reader_and_writer + S3Object.in_test_mode do + headers = {'content-type' => 'text/plain'} + S3Object.request_returns :headers => headers + + assert_nothing_raised do + @object.content_type + end + + assert_equal 'text/plain', @object.content_type + + assert_nothing_raised do + @object.content_type = 'image/jpg' + end + + assert_equal 'image/jpg', @object.content_type + + assert_raises(NoMethodError) do + @object.non_existant_header_setting + end + end + end + + def test_key_name_validation + assert_raises(InvalidKeyName) do + S3Object.create(nil, '', 'marcel') + end + + assert_raises(InvalidKeyName) do + huge_name = 'a' * 1500 + S3Object.create(huge_name, '', 'marcel') + end + end + + def test_content_type_inference + [ + ['foo.jpg', {}, 'image/jpeg'], + ['foo.txt', {}, 'text/plain'], + ['foo', {}, nil], + ['foo.asdf', {}, nil], + ['foo.jpg', {:content_type => nil}, nil], + ['foo', {:content_type => 'image/jpg'}, 'image/jpg'], + ['foo.jpg', {:content_type => 'image/png'}, 'image/png'], + ['foo.asdf', {:content_type => 'image/jpg'}, 'image/jpg'] + ].each do |key, options, content_type| + S3Object.send(:infer_content_type!, key, options) + assert_equal content_type, options[:content_type] + end + end + + def test_object_has_owner + assert_kind_of Owner, @object.owner + end + + def test_owner_attributes_are_accessible + owner = @object.owner + assert owner.id + assert owner.display_name + assert_equal 'bb2041a25975c3d4ce9775fe9e93e5b77a6a9fad97dc7e00686191f3790b13f1', owner.id + assert_equal 'mmolina@onramp.net', owner.display_name + end + + def test_only_valid_attributes_accessible + assert_raises(NoMethodError) do + @object.owner.foo + end + end + + def test_fetching_object_value_generates_value_object + S3Object.in_test_mode do + S3Object.request_returns :body => 'hello!' + value = S3Object.value('foo', 'bar') + assert_kind_of S3Object::Value, value + assert_equal 'hello!', value + end + end + + def test_fetching_file_by_name_raises_when_heuristic_fails + S3Object.request_always_returns :body => Fixtures::Buckets.bucket_with_one_key do + assert_raises(NoSuchKey) do + S3Object.find('not_tongue_overload.jpg', 'marcel_molina') + end + + object = nil # Block scoping + assert_nothing_raised do + object = S3Object.find('tongue_overload.jpg', 'marcel_molina') + end + assert_kind_of S3Object, object + assert_equal 'tongue_overload.jpg', object.key + end + end + + def test_about + S3Object.in_test_mode do + headers = {'content-size' => '12345', 'date' => Time.now.httpdate, 'content-type' => 'application/xml'} + S3Object.request_returns :headers => headers + about = S3Object.about('foo', 'bar') + assert_kind_of S3Object::About, about + assert_equal headers, about + + S3Object.request_returns :code => 404 + assert_raises(NoSuchKey) do + S3Object.about('foo', 'bar') + end + end + end + + def test_exists? + S3Object.in_test_mode do + S3Object.request_returns :code => 404 + assert_equal false, S3Object.exists?('foo', 'bar') + + S3Object.request_returns :code => 200 + assert_equal true, S3Object.exists?('foo', 'bar') + end + end + + def test_s3object_equality + Bucket.in_test_mode do + Bucket.request_returns :body => Fixtures::Buckets.bucket_with_more_than_one_key + file1, file2 = Bucket.objects('does not matter') + assert file1 == file1 + assert file2 == file2 + assert !(file1 == file2) # /!\ Parens required /!\ + end + end + + def test_inspect + S3Object.in_test_mode do + S3Object.request_returns :body => Fixtures::Buckets.bucket_with_one_key + object = S3Object.find('tongue_overload.jpg', 'bucket does not matter') + assert object.path + assert_nothing_raised { object.inspect } + assert object.inspect[object.path] + end + end + + def test_etag + S3Object.in_test_mode do + S3Object.request_returns :body => Fixtures::Buckets.bucket_with_one_key + file = S3Object.find('tongue_overload.jpg', 'bucket does not matter') + assert file.etag + assert_equal 'f21f7c4e8ea6e34b268887b07d6da745', file.etag + end + end + + def test_fetching_information_about_an_object_that_does_not_exist_raises_no_such_key + S3Object.in_test_mode do + S3Object.request_returns :body => '', :code => 404 + assert_raises(NoSuchKey) do + S3Object.about('asdfasdfasdfas-this-does-not-exist', 'bucket does not matter') + end + end + end +end + +class MetadataTest < Test::Unit::TestCase + def setup + @metadata = S3Object::Metadata.new(Fixtures::Headers.headers_including_one_piece_of_metadata) + end + + def test_only_metadata_is_extracted + assert @metadata.to_headers.size == 1 + assert @metadata.to_headers['x-amz-meta-test'] + assert_equal 'foo', @metadata.to_headers['x-amz-meta-test'] + end + + def test_setting_new_metadata_normalizes_name + @metadata[:bar] = 'baz' + assert @metadata.to_headers.include?('x-amz-meta-bar') + @metadata['baz'] = 'quux' + assert @metadata.to_headers.include?('x-amz-meta-baz') + @metadata['x-amz-meta-quux'] = 'whatever' + assert @metadata.to_headers.include?('x-amz-meta-quux') + end + + def test_clobbering_existing_header + @metadata[:bar] = 'baz' + assert_equal 'baz', @metadata.to_headers['x-amz-meta-bar'] + @metadata[:bar] = 'quux' + assert_equal 'quux', @metadata.to_headers['x-amz-meta-bar'] + @metadata['bar'] = 'foo' + assert_equal 'foo', @metadata.to_headers['x-amz-meta-bar'] + @metadata['x-amz-meta-bar'] = 'bar' + assert_equal 'bar', @metadata.to_headers['x-amz-meta-bar'] + end + + def test_invalid_metadata + @metadata[:invalid_header] = ' ' * (S3Object::Metadata::SIZE_LIMIT + 1) + assert_raises InvalidMetadataValue do + @metadata.to_headers + end + end +end + +class ValueTest < Test::Unit::TestCase + def setup + @response = FakeResponse.new(:body => 'hello there') + @value = S3Object::Value.new(@response) + end + + def test_value_is_set_to_response_body + assert_equal @response.body, @value + end + + def test_response_is_accessible_from_value_object + assert_equal @response, @value.response + end +end \ No newline at end of file diff --git a/test/parsing_test.rb b/test/parsing_test.rb new file mode 100644 index 0000000..5410dbd --- /dev/null +++ b/test/parsing_test.rb @@ -0,0 +1,66 @@ +require File.dirname(__FILE__) + '/test_helper' + +class TypecastingTest < Test::Unit::TestCase + # Make it easier to call methods in tests + Parsing::Typecasting.public_instance_methods.each do |method| + Parsing::Typecasting.send(:module_function, method) + end + + def test_array_with_one_element_that_is_a_hash + value = [{'Available' => 'true'}] + assert_equal [{'available' => true}], Parsing::Typecasting.typecast(value) + end + + def test_hash_with_one_key_whose_value_is_an_array + value = { + 'Bucket' => + [ + {'Available' => 'true'} + ] + } + + expected = { + 'bucket' => + [ + {'available' => true} + ] + } + assert_equal expected, Parsing::Typecasting.typecast(value) + end + +end + +class XmlParserTest < Test::Unit::TestCase + def test_bucket_is_always_forced_to_be_an_array_unless_empty + one_bucket = Parsing::XmlParser.new(Fixtures::Buckets.bucket_list_with_one_bucket) + more_than_one = Parsing::XmlParser.new(Fixtures::Buckets.bucket_list_with_more_than_one_bucket) + + [one_bucket, more_than_one].each do |bucket_list| + assert_kind_of Array, bucket_list['buckets']['bucket'] + end + + no_buckets = Parsing::XmlParser.new(Fixtures::Buckets.empty_bucket_list) + assert no_buckets.has_key?('buckets') + assert_nil no_buckets['buckets'] + end + + def test_bucket_contents_are_forced_to_be_an_array_unless_empty + one_key = Parsing::XmlParser.new(Fixtures::Buckets.bucket_with_one_key) + more_than_one = Parsing::XmlParser.new(Fixtures::Buckets.bucket_with_more_than_one_key) + [one_key, more_than_one].each do |bucket_with_contents| + assert_kind_of Array, bucket_with_contents['contents'] + end + + no_keys = Parsing::XmlParser.new(Fixtures::Buckets.empty_bucket) + assert !no_keys.has_key?('contents') + end + + def test_policy_grants_are_always_an_array + policy = Parsing::XmlParser.new(Fixtures::Policies.policy_with_one_grant) + assert_kind_of Array, policy['access_control_list']['grant'] + end + + def test_empty_xml_response_is_not_parsed + assert_equal({}, Parsing::XmlParser.new('')) + end +end \ No newline at end of file diff --git a/test/remote/acl_test.rb b/test/remote/acl_test.rb new file mode 100644 index 0000000..8fa9bff --- /dev/null +++ b/test/remote/acl_test.rb @@ -0,0 +1,117 @@ +require File.dirname(__FILE__) + '/test_helper' + +class RemoteACLTest < Test::Unit::TestCase + + def setup + establish_real_connection + end + + def teardown + disconnect! + end + + def test_acl + Bucket.create(TEST_BUCKET) # Wipe out the existing bucket's ACL + + bucket_policy = Bucket.acl(TEST_BUCKET) + assert_equal 1, bucket_policy.grants.size + assert !bucket_policy.grants.include?(:public_read_acp) + + bucket_policy.grants << ACL::Grant.grant(:public_read_acp) + + assert_nothing_raised do + Bucket.acl(TEST_BUCKET, bucket_policy) + end + + bucket = Bucket.find(TEST_BUCKET) + assert bucket.acl.grants.include?(:public_read_acp) + + bucket.acl.grants.pop # Get rid of the newly added grant + + assert !bucket.acl.grants.include?(:public_read_acp) + bucket.acl(bucket.acl) # Update its acl + assert Service.response.success? + + bucket_policy = Bucket.acl(TEST_BUCKET) + assert_equal 1, bucket_policy.grants.size + assert !bucket_policy.grants.include?(:public_read_acp) + + S3Object.store('testing-acls', 'the test data', TEST_BUCKET, :content_type => 'text/plain') + acl = S3Object.acl('testing-acls', TEST_BUCKET) + + # Confirm object has the default policy + + assert !acl.grants.empty? + assert_equal 1, acl.grants.size + grant = acl.grants.first + + assert_equal 'FULL_CONTROL', grant.permission + + grantee = grant.grantee + + assert acl.owner.id + assert acl.owner.display_name + assert grantee.id + assert grantee.display_name + + assert_equal acl.owner.id, grantee.id + assert_equal acl.owner.display_name, grantee.display_name + + assert_equal Owner.current, acl.owner + + + # Manually add read access to an Amazon customer by email address + + new_grant = ACL::Grant.new + new_grant.permission = 'READ' + new_grant_grantee = ACL::Grantee.new + new_grant_grantee.email_address = 'marcel@vernix.org' + new_grant.grantee = new_grant_grantee + acl.grants << new_grant + + assert_nothing_raised do + S3Object.acl('testing-acls', TEST_BUCKET, acl) + end + + # Confirm the acl was updated successfully + + assert Service.response.success? + + acl = S3Object.acl('testing-acls', TEST_BUCKET) + assert !acl.grants.empty? + assert_equal 2, acl.grants.size + new_grant = acl.grants.last + assert_equal 'READ', new_grant.permission + + # Confirm instance method has same result + + assert_equal acl.grants, S3Object.find('testing-acls', TEST_BUCKET).acl.grants + + # Get rid of the grant we just added + + acl.grants.pop + + # Confirm acl class method sees that the bucket option is being used to put a new acl + + assert_nothing_raised do + TestS3Object.acl('testing-acls', acl) + end + + assert Service.response.success? + + acl = TestS3Object.acl('testing-acls') + + # Confirm added grant was removed from the policy + + assert !acl.grants.empty? + assert_equal 1, acl.grants.size + grant = acl.grants.first + assert_equal 'FULL_CONTROL', grant.permission + + assert_nothing_raised do + S3Object.delete('testing-acls', TEST_BUCKET) + end + + assert Service.response.success? + end +end \ No newline at end of file diff --git a/test/remote/bittorrent_test.rb b/test/remote/bittorrent_test.rb new file mode 100644 index 0000000..6cd9ff0 --- /dev/null +++ b/test/remote/bittorrent_test.rb @@ -0,0 +1,45 @@ +require File.dirname(__FILE__) + '/test_helper' + +class RemoteBittorrentTest < Test::Unit::TestCase + def setup + establish_real_connection + end + + def teardown + disconnect! + end + + def test_bittorrent + bt_test_key = 'testing-bittorrent' + S3Object.create(bt_test_key, 'foo', TEST_BUCKET) + + # Confirm we can fetch a bittorrent file for this object + + torrent_file = nil + assert_nothing_raised do + torrent_file = S3Object.torrent_for(bt_test_key, TEST_BUCKET) + end + assert torrent_file + assert torrent_file['tracker'] + + # Make object accessible to the public via a torrent + + policy = S3Object.acl(bt_test_key, TEST_BUCKET) + + assert !policy.grants.include?(:public_read) + + assert_nothing_raised do + S3Object.grant_torrent_access_to(bt_test_key, TEST_BUCKET) + end + + policy = S3Object.acl(bt_test_key, TEST_BUCKET) + + assert policy.grants.include?(:public_read) + + # Confirm instance method wraps class method + + assert_equal torrent_file, S3Object.find(bt_test_key, TEST_BUCKET).torrent + + S3Object.delete(bt_test_key, TEST_BUCKET) + end +end \ No newline at end of file diff --git a/test/remote/bucket_test.rb b/test/remote/bucket_test.rb new file mode 100644 index 0000000..8c84f78 --- /dev/null +++ b/test/remote/bucket_test.rb @@ -0,0 +1,146 @@ +require File.dirname(__FILE__) + '/test_helper' + +class RemoteBucketTest < Test::Unit::TestCase + + def setup + establish_real_connection + assert Bucket.find(TEST_BUCKET).delete_all + end + + def teardown + disconnect! + end + + def test_bucket + # Fetch the testing bucket + + bucket = nil + assert_nothing_raised do + bucket = Bucket.find(TEST_BUCKET) + end + + assert bucket + + # Confirm we can fetch the bucket implicitly + + bucket = nil + assert_nothing_raised do + bucket = TestBucket.find + end + + assert bucket + + # Confirm the bucket has the right name + + assert_equal TEST_BUCKET, bucket.name + + assert bucket.empty? + assert_equal 0, bucket.size + + # Add some files to the bucket + + assert_nothing_raised do + %w(a m z).each do |file_name| + S3Object.create(file_name, file_name, bucket.name, :content_type => 'text/plain') + end + end + + # Confirm that we can reload the objects + + assert_nothing_raised do + bucket.objects(:reload) + end + + assert !bucket.empty? + assert_equal 3, bucket.size + + assert_nothing_raised do + bucket.objects(:marker => 'm') + end + + assert_equal 1, bucket.size + assert bucket['z'] + + assert_equal 1, Bucket.find(TEST_BUCKET, :max_keys => 1).size + + assert_nothing_raised do + bucket.objects(:reload) + end + + assert_equal 3, bucket.size + + # Ensure the reloaded buckets have been repatriated + + assert_equal bucket, bucket.objects.first.bucket + + # Confirm that we can delete one of the objects and it will be removed + + object_to_be_deleted = bucket.objects.last + assert_nothing_raised do + object_to_be_deleted.delete + end + + assert !bucket.objects.include?(object_to_be_deleted) + + # Confirm that we can add an object + + object = bucket.new_object(:value => 'hello') + + assert_raises(NoKeySpecified) do + object.store + end + + object.key = 'abc' + assert_nothing_raised do + object.store + end + + assert bucket.objects.include?(object) + + # Confirm that the object is still there after reloading its objects + + assert_nothing_raised do + bucket.objects(:reload) + end + assert bucket.objects.include?(object) + + # Check that TestBucket has the same objects fetched implicitly + + assert_equal bucket.objects, TestBucket.objects + + # Empty out bucket + + assert_nothing_raised do + bucket.delete_all + end + + assert bucket.empty? + + bucket = nil + assert_nothing_raised do + bucket = Bucket.find(TEST_BUCKET) + end + + assert bucket.empty? + end + + def test_bucket_name_is_switched_with_options_when_bucket_is_implicit_and_options_are_passed + Object.const_set(:ImplicitlyNamedBucket, Class.new(Bucket)) + ImplicitlyNamedBucket.current_bucket = TEST_BUCKET + assert ImplicitlyNamedBucket.objects.empty? + + %w(a b c).each {|key| S3Object.store(key, 'value does not matter', TEST_BUCKET)} + + assert_equal 3, ImplicitlyNamedBucket.objects.size + + objects = nil + assert_nothing_raised do + objects = ImplicitlyNamedBucket.objects(:max_keys => 1) + end + + assert objects + assert_equal 1, objects.size + ensure + %w(a b c).each {|key| S3Object.delete(key, TEST_BUCKET)} + end +end \ No newline at end of file diff --git a/test/remote/logging_test.rb b/test/remote/logging_test.rb new file mode 100644 index 0000000..1a4209c --- /dev/null +++ b/test/remote/logging_test.rb @@ -0,0 +1,82 @@ +require File.dirname(__FILE__) + '/test_helper' + +class RemoteLoggingTest < Test::Unit::TestCase + def setup + establish_real_connection + end + + def teardown + disconnect! + end + + def test_logging + Bucket.create(TEST_BUCKET) # Clear out any custom grants + + # Confirm that logging is not enabled on the test bucket + + assert !Bucket.logging_enabled_for?(TEST_BUCKET) + assert !Bucket.find(TEST_BUCKET).logging_enabled? + + assert_equal [], Bucket.logs_for(TEST_BUCKET) + + # Confirm the current bucket doesn't have logging grants + + policy = Bucket.acl(TEST_BUCKET) + assert !policy.grants.include?(:logging_read_acp) + assert !policy.grants.include?(:logging_write) + + # Confirm that we can enable logging + + assert_nothing_raised do + Bucket.enable_logging_for TEST_BUCKET + end + + # Confirm enabling logging worked + + assert Service.response.success? + + assert Bucket.logging_enabled_for?(TEST_BUCKET) + assert Bucket.find(TEST_BUCKET).logging_enabled? + + # Confirm the appropriate grants were added + + policy = Bucket.acl(TEST_BUCKET) + assert policy.grants.include?(:logging_read_acp) + assert policy.grants.include?(:logging_write) + + # Confirm logging status used defaults + + logging_status = Bucket.logging_status_for TEST_BUCKET + assert_equal TEST_BUCKET, logging_status.target_bucket + assert_equal 'log-', logging_status.target_prefix + + # Confirm we can update the logging status + + logging_status.target_prefix = 'access-log-' + + assert_nothing_raised do + Bucket.logging_status_for TEST_BUCKET, logging_status + end + + assert Service.response.success? + + logging_status = Bucket.logging_status_for TEST_BUCKET + assert_equal 'access-log-', logging_status.target_prefix + + # Confirm we can make a request for the bucket's logs + + assert_nothing_raised do + Bucket.logs_for TEST_BUCKET + end + + # Confirm we can disable logging + + assert_nothing_raised do + Bucket.disable_logging_for(TEST_BUCKET) + end + + assert Service.response.success? + + assert !Bucket.logging_enabled_for?(TEST_BUCKET) + end +end \ No newline at end of file diff --git a/test/remote/object_test.rb b/test/remote/object_test.rb new file mode 100644 index 0000000..b66e40b --- /dev/null +++ b/test/remote/object_test.rb @@ -0,0 +1,371 @@ +require File.dirname(__FILE__) + '/test_helper' + +class RemoteS3ObjectTest < Test::Unit::TestCase + def setup + establish_real_connection + end + + def teardown + disconnect! + end + + def test_object + key = 'testing_s3objects' + value = 'testing' + content_type = 'text/plain' + unauthenticated_url = ['http:/', Base.connection.http.address, TEST_BUCKET, key].join('/') + + # Create an object + + response = nil + assert_nothing_raised do + response = S3Object.create(key, value, TEST_BUCKET, :access => :public_read, :content_type => content_type) + end + + # Check response + + assert response.success? + + # Extract the object's etag + + etag = nil + assert_nothing_raised do + etag = response.etag + end + + assert etag + + # Confirm we can't create an object unless the bucket is set + + assert_raises(NoBucketSpecified) do + object = S3Object.new + object.key = 'hello' + object.store + end + + # Fetch newly created object to show it was actually created + + object = nil + assert_nothing_raised do + object = S3Object.find(key, TEST_BUCKET) + end + + assert object + + # Confirm it has the right etag + + assert_equal etag, object.etag + + # Check if its owner is properly set + + assert_nothing_raised do + object.owner.display_name + end + + # Confirm we can get the object's key + + assert_equal key, object.key + + # Confirm its value was properly set + + assert_equal value, object.value + assert_equal value, S3Object.value(key, TEST_BUCKET) + streamed_value = '' + assert_nothing_raised do + S3Object.stream(key, TEST_BUCKET) do |segment| + streamed_value << segment + end + end + + assert_equal value, streamed_value + + # Change its value + + new_value = "" + assert_nothing_raised do + object.value = new_value + end + assert_equal new_value, object.value + + # Confirm content type was properly set + + assert_equal content_type, object.content_type + + # Change its content type + + new_content_type = 'text/javascript' + assert_nothing_raised do + object.content_type = new_content_type + end + + assert_equal new_content_type, object.content_type + + # Test that it is publicly readable + + response = fetch_object_at(unauthenticated_url) + assert (200..299).include?(response.code.to_i) + + # Confirm that it has no meta data + + assert object.metadata.empty? + + # Set some meta data + + metadata_key = :secret_sauce + metadata_value = "it's a secret" + object.metadata[metadata_key] = metadata_value + + # Persist all changes + + assert_nothing_raised do + object.store + end + + # Refetch the object + + key = object.key + object = nil + assert_nothing_raised do + object = S3Object.find(key, TEST_BUCKET) + end + + # Confirm all changes were persisted + + assert object + assert_equal key, object.key + + assert_equal new_content_type, object.content_type + + assert_equal new_value, object.value + assert_equal new_value, object.value(:reload) + + assert !object.metadata.empty? + assert_equal metadata_value, object.metadata[metadata_key] + + # Change acl + + assert_nothing_raised do + S3Object.create(object.key, object.value, TEST_BUCKET, :access => :private, :content_type => object.content_type) + end + + # Confirm object is no longer publicly readable + + response = fetch_object_at(unauthenticated_url) + assert (400..499).include?(response.code.to_i) + + # Confirm object is accessible from its authenticated url + + response = fetch_object_at(object.url) + assert (200..299).include?(response.code.to_i) + + # Copy the object + + assert_nothing_raised do + object.copy('testing_s3objects-copy') + end + + # Confirm the object is identical + + copy = nil + assert_nothing_raised do + copy = S3Object.find('testing_s3objects-copy', TEST_BUCKET) + end + + assert copy + + assert_equal object.value, copy.value + assert_equal object.content_type, copy.content_type + + # Delete object + + assert_nothing_raised do + object.delete + end + + # Confirm we can rename objects + + renamed_to = copy.key + '-renamed' + renamed_value = copy.value + assert_nothing_raised do + S3Object.rename(copy.key, renamed_to, TEST_BUCKET) + end + + # Confirm renamed copy exists + + renamed = nil + assert_nothing_raised do + renamed = S3Object.find(renamed_to, TEST_BUCKET) + end + + assert renamed + assert_equal renamed_value, renamed.value + + # Confirm copy is deleted + + assert_raises(NoSuchKey) do + S3Object.find(copy.key, TEST_BUCKET) + end + + # Confirm that you can not store an object once it is deleted + + assert_raises(DeletedObject) do + object.store + end + + assert_raises(NoSuchKey) do + S3Object.find(key, TEST_BUCKET) + end + + # Confirm we can pass in an IO stream and have the uploading sent in chunks + + response = nil + test_file_key = File.basename(TEST_FILE) + assert_nothing_raised do + response = S3Object.store(test_file_key, open(TEST_FILE), TEST_BUCKET) + end + assert response.success? + + assert_equal File.size(TEST_FILE), Integer(S3Object.about(test_file_key, TEST_BUCKET)['content-length']) + + result = nil + assert_nothing_raised do + result = S3Object.delete(test_file_key, TEST_BUCKET) + end + + assert result + end + + def test_content_type_inference + # Confirm appropriate content type is inferred when not specified + + content_type_objects = {'foo.jpg' => 'image/jpeg', 'no-extension-specified' => 'binary/octet-stream', 'foo.txt' => 'text/plain'} + content_type_objects.each_key do |key| + S3Object.store(key, 'fake data', TEST_BUCKET) # No content type explicitly set + end + + content_type_objects.each do |key, content_type| + assert_equal content_type, S3Object.about(key, TEST_BUCKET)['content-type'] + end + + # Confirm we can update the content type + + assert_nothing_raised do + object = S3Object.find('no-extension-specified', TEST_BUCKET) + object.content_type = 'application/pdf' + object.store + end + + assert_equal 'application/pdf', S3Object.about('no-extension-specified', TEST_BUCKET)['content-type'] + + ensure + # Get rid of objects we just created + content_type_objects.each_key {|key| S3Object.delete(key, TEST_BUCKET) } + end + + def test_body_can_be_more_than_just_string_or_io + require 'stringio' + key = 'testing-body-as-string-io' + io = StringIO.new('hello there') + S3Object.store(key, io, TEST_BUCKET) + assert_equal 'hello there', S3Object.value(key, TEST_BUCKET) + ensure + S3Object.delete(key, TEST_BUCKET) + end + + def test_fetching_information_about_an_object_that_does_not_exist_raises_no_such_key + assert_raises(NoSuchKey) do + S3Object.about('asdfasdfasdfas-this-does-not-exist', TEST_BUCKET) + end + end + + # Regression test for http://developer.amazonwebservices.com/connect/thread.jspa?messageID=49152&tstart=0#49152 + def test_finding_an_object_with_slashes_in_its_name_does_not_escape_the_slash + S3Object.store('rails/1', 'value does not matter', TEST_BUCKET) + S3Object.store('rails/1.html', 'value does not matter', TEST_BUCKET) + + object = nil + assert_nothing_raised do + object = S3Object.find('rails/1.html', TEST_BUCKET) + end + + assert_equal 'rails/1.html', object.key + ensure + %w(rails/1 rails/1.html).each {|key| S3Object.delete(key, TEST_BUCKET)} + end + + def test_finding_an_object_with_spaces_in_its_name + assert_nothing_raised do + S3Object.store('name with spaces', 'value does not matter', TEST_BUCKET) + end + + object = nil + assert_nothing_raised do + object = S3Object.find('name with spaces', TEST_BUCKET) + end + + assert object + assert_equal 'name with spaces', object.key + + # Confirm authenticated url is generated correctly despite space in file name + + response = fetch_object_at(object.url) + assert (200..299).include?(response.code.to_i) + + ensure + S3Object.delete('name with spaces', TEST_BUCKET) + end + + def test_copying_an_object_should_copy_over_its_acl_also + key = 'copied-objects-inherit-acl' + copy_key = key + '2' + S3Object.store(key, 'value does not matter', TEST_BUCKET) + original_object = S3Object.find(key, TEST_BUCKET) + original_object.acl.grants << ACL::Grant.grant(:public_read) + original_object.acl.grants << ACL::Grant.grant(:public_read_acp) + + S3Object.acl(key, TEST_BUCKET, original_object.acl) + + acl = S3Object.acl(key, TEST_BUCKET) + assert_equal 3, acl.grants.size + + S3Object.copy(key, copy_key, TEST_BUCKET) + copied_object = S3Object.find(copy_key, TEST_BUCKET) + assert_equal acl.grants, copied_object.acl.grants + ensure + S3Object.delete(key, TEST_BUCKET) + S3Object.delete(copy_key, TEST_BUCKET) + end + + def test_handling_a_path_that_is_not_valid_utf8 + key = "318597/620065/GTL_75\24300_A600_A610.zip" + assert_nothing_raised do + S3Object.store(key, 'value does not matter', TEST_BUCKET) + end + + object = nil + assert_nothing_raised do + object = S3Object.find(key, TEST_BUCKET) + end + + assert object + + url = nil + assert_nothing_raised do + url = S3Object.url_for(key, TEST_BUCKET) + end + + assert url + + assert_equal object.value, fetch_object_at(url).body + ensure + assert_nothing_raised do + S3Object.delete(key, TEST_BUCKET) + end + end + + private + def fetch_object_at(url) + Net::HTTP.get_response(URI.parse(url)) + end + +end \ No newline at end of file diff --git a/test/remote/test_file.data b/test/remote/test_file.data new file mode 100644 index 0000000000000000000000000000000000000000..7b7df4f4ea8046d652a4c523c6ead775cab1d2bf GIT binary patch literal 60673 zcmdqHbyQr-wm!NVhv4pz;1CGz1h>ZB3BhUH1BBp&;10pvB|ziu7Th(#LeSt4^mX>$ z=brP%yYG$Py??&BMpb>QYLl~59(DkLgFSz7_~QX& zKw9A6JixGqW&VTJVfaPZzuU|34>tdkB^DV#fCcQFob2$ifAiCa=_CAu9bgy~_qPrR z#u+Z|@A3ox=)|M_Rd+ZHBgX$79}mOG@qgF#r_WLT!GEek{SS=xkA50V|9w0%@ExX{ z2g5+lKkeaQX9wl{dwk$>{_bOcm=63u_&ba<;y*YL*4IEj@?Y(Nhe6Ok_^-Yx_`4if zGywM>82&#n(tqGTdT21W3F{dCfAD{f1g{{MfFKtS@ZZJ#q43vG{F4thL;yC-ss8i+ zXS}Il!%Yo)M+ZQ#-cN)fF<6t~V0Z|IXJHurADjVe0Rjw@!s5+gJ%k9u?_l@?3?sqt zJq%yLF!De8XGpLf{|C1q!&b(><=emE|2u*e_eXMJZVX_YF$COJt$}c@KXI6UJea^j zD&!{+un4PFG6?__M=SS7okpMX0OO}?0Nl~c4mbe|hkyJ(LgosaUO6y^Mld%60LBdf z&j*k8LI#zH4UugpJ3@Q zW`B#*C@cgykvS1Nl{nG-D;5C4cm9q4cgR=$f0qwv0PO<4fD;!0_zn&l0Zx&yhu9ZZ z*y^gQ3{VP40iJ+8Kn!pIp@7GxE3IyYrdKI#ugxcv>-&?nleJA4{+}&C zp#W9@gFjOpSd{(=Vg>2`*#N^OADs)@-pB(0*EA!DX8%wM5_X#= zO1KVX*Qi=JaoV8$e<#!^WB_P&we1aTGNS;XC(t7b3Tz<%e*UxvHcWpuWSIOR3)>+7 z@?q`#C;X%FclzH*?5}tb**<$1dJ=m$;uz+l7#Vs+0}xJT{73Hz=44>=tDg)2L>^%C zk^A2z{gw4EPYn3;{3R@~*>#l)06O&mpn(BfPdorH(*|2*Rsf(3Tb9QPuzj`{0Q_LH zXS5DBiPQj~qX+;5`T*eLJOH@CW~N^r0K`?o(o6tg!3F?Yr2*jZ7fc6+D`0CeP8I-q z8e#FL01y@d+chI$ditr=h!D2h@D8yb zM)rRo2)^2pe;&*wAU}L39M`G9>863|e^9{xZ&d%B#&EDL4*=8!_+jYZr`aEhXaM{_ z_X*fW{a^PIO4NVfOK4&Ezq^;bgvI~sUcwE-|J}Vr7#0tL`}>?!gJHOTa4T#z{E`3p zT?2Spd|Cm50qke!nCNKOm>8J2IM{f^jHJYbgv1;)bQFw&Tq42(T>N}ua)!!cQaaLn z{Hpe9I&Vy^tgT)vIeR&ozcaM3G6Uh@;t~@Ozak}lWhTKdVfH_kr#=vDX@U?zf63o; z{*Z-7Ktw`DK}CCp4Zwlm;o%VA5fKq!y$|yJb6G*aLBxH|A&!Kn_7?etGd^cPTrLWY zL`^q=`qT+6m#Ir2DjFdXF$pOhJp&^XGdB+}AHRU0q?ELbteiYpLsLszM^{hZ%-q7# z%Gw6v>gMj@`OeEbC^#haW7wzg_=Loy6zKN`Gv)$<&Dj)?VTUHKlk=e&(1F{udZ)?--2M1;a~DMqyON81LFgSfB=tx z{D%(+&hrmh90bJY97wq0YRGS$@m_ESpx{fy<<@kg(r~Gt5SY45p%Kz@Z_u6oq4XD{ z|1&~?|0_oSMd-iyJaqv$D6s2E4m{{N0EYvD#{oSp1L*Lu1%U&P1Be2C9|LS?#fP^D zDt+9qEY8e+7k+rW43YlDYfp4spc+3UZAtua<(Mvf#e=)z&vABfaaF1y-tcBxlWya? z`M?^oa?)4GPlG<4=ZfYJIUWL)`8DaQN5?o>TahMtalf^YU---#S~89!&(}b{PKhA2 z#&dbA&Bp6A9cdQUHcAwXcqR<;;I@1oWgVi4>WlpO1TexEdEL&9_RE=nBzI8U_Rg52 zb9sCEMGJ+&hI%V8Ls9sQz$B@EuY#u_QBT%*H;8u7r9o=`i4uJqT3CfD^er*z5>$s_^|G-OLNb~FM@*%f zJ@Xpd-18PPK6_5i6D};xFv9 z-kIAp$zWt)3e7|noUvWYtn%-6idd(0PTj zoYbP^Apjg{qpdV%K50dlh)p&JtI+(D%Habp&e4A4^Lalvlz-7v<#X}fXcoS0 zbY;7=3{npvRa`uX;O3eVjk3{?mb!d-{1s0D^q^jnU|}GiT1;+3nv5xMqjrpNU*p*3uqa0 z9O$OJ1IfDB)T)V;u%J-8DCy}TNCz>Q%BphO3LKeQ%I3x)Ra)=*(~{iVPFl1_3Br8? zmsTpNfsND#S0!H8YyJB1c9=&vzZs9(D>v@UHG4<8BGC2+0$j}nMV;n_q|D~4 zI|eGP@9{TDvCqb5#ExdYAuk1SS*Vu^p{cra^4nYOaI}%TB?PZV*K2ueo^eK=kBLKF z(-5S&AsW&Z0!j0l9P;P>%I|*Z@x5<}5wn#MHzkq|XLlKFqF|PZb#UGh z448`D6%DasxuE-&i}0#J?%QOLQ34>wvZ`H4N{h1JN59cJ!}DC4s^@i)zq!uyh(092 zxLeZDD-lq5y?7| zG6jR4Uv^dMrTTwIf7{;9Y(21vV$>PKP0gJ9TXq`y?iP!;FSkc|Cxjfr01#ZX6&l>E=72WFX0bp8RR)pTs0aj$6YsF*?yM~AIA;k{`o zhG=$a6}94AJL7?`Vgl4!@(!{qrVKv-aueq?2XHU=Ff$Lg^K+ZO?AmpaXmWmoE$`S% zt5G&g{D27!RqkNtv-`pM9M-~-9?2j0EUnoiRehh`;7fi-*}yXqT0|nQ%ady>ZPl7= zyZC`!EFDc#GRhWd$|_m%9@1)A;G&YlNZo9kPnN)oSN_s zJ$LQukhHU~3uMNiX=+$-th0T+$xokkMsz#PZAL5xoL|FXQ6^=-++*zqsU-7LUR2Ou zFhnT`m^R%re+Adn)ls7C=}Ya#HtM{s&53H>y=P&wk{K7cD2>B4cUmd<81kax!QcI* zi)$?s@cBIU~4x|ANP zK#{_0T}rWA&6=pROAr4tWuaN@p|cKD782n!1@&rs`6-06*e3CN{+yTPY^#xI_BKxH ztu@|4@@z<0z@4jlLnw`Em;ZWbrl5oJ_Be#Om#+RSm-do!F6&(OOXU69L#fs^Nes`F zgfx1EW@f>~ixObn>b*zBOf%E!LQNzi`cn5rdCZ}!e!uP%E4uwEFZ=3gKzL`7sZ<>K zTXAG-5nFjo`alEeRwqRqO$SrzcqD))og2|^Kw4DTH5`blBH+l-@@&OzWxni}Ly2ha zFjW@P*tVn-6OzbEoDZ}}XJh-wuw$$Q5w|mMip?TWp`9x^w^rd}E1IjYjyUtjDlG=X zyE%>$%;&||3S^{vTBw&crO-qVe5uZhCTVOz7Zpb9#UUGb2#+|nYN5x({O$*?YA6;M zVisD!r_5<&>rNv6{=jUP_EV-v{Y5A>I%YkjyRCVJqF-aNusL`4h=vDY<~glZrKO%! z9a0}eUgRUs+m|~DKfHJ`7q=%_5@aplV(;rPk2GJs)jfW80*}dYpxo0Gc8EhBiENCa z7zVazVwjIzFSzQYKmK60-tdyn>ut9HO{Klkz6DQisUy9N^G(|4XzUvL7DdICX1E&U z(k=q*`1B~@V!C6F8_6D zG}eno144`HZ&YAcA$mRU0PQPM43VtkJ+Jo&RnBj_M=mRhx{Oq z$Gj;Gl^0e?<@4s?@xWfkrO8$}XN@6A@{A)2fMBsfAnS+7w+S}|c2#5&?i=q-cHOL- zE&>smE6z(Y#upif@IOdI^+QEto%k=R^m>zJYCI)UOub2;1*`}2T3RK&zBn$Xte0f9 zLBW=PelV#WkOAuYk(2kF{yxKGH@9TtJI8zYmdrjO4{uHNK+RoZ0vfu2X<8r%FD@n} z{{w+36DoykHe_68_$Sz2T_yFhsPeNh{OLuRhIIHqi>U*9K$5q z%!-031#zWZt~~j#=btCE&9sf>j`BDS73HXUFx+X9J`>@7J{Dfxq9Hy+hA3627~Wl1 zK#n8)x8JbyDx*caY8AfL2;f4}7^xv9xhRoI-q&zo>e>h>LTp9qb3is4s5tXJRp(HF z-&0g~`KB)>AFnK5QuI46)Oiqs#E|*Y$sbdUVgQ1&oBaVrNPJ&bKZPu5e6;lasK4eT z0sAZ-g_G~Ty_q!a(01pWUvp#;+b`ic;AhdCbj-)KC8A`bzDSJ?uDcHIC~DA7J7j*k zKn^gPk|o0*2ccww%E7w3^tI*9s*9mJv<&aD>IWSLO20I{XXy!s&nhN}D;&V(zv$<7 zr$D_t(tfd%a1=M2l`lM3yS9&p&CZ=k1yIWBiwW;i&PMgryiNo@oy%0nIwUUD)@0`F z)^j|I%CFaG_Rv`_fa{(2rx1+#4ETj;;9~dTOjNoZS~R~9&eXBF-+KatfOrIRWNPzM zrK0{kD$@5|pbV|RBY$}Ya%~<@HZshY-d6>e{g!&E%_dl4SokHc%@Ptc5&Vv6}@VQpVoWM)-laehF?Uyst#V7rmLHzWSw2)V-<58jVYPek;>0^Kbp(l z%SZ2}v3M z$MTnD0QwH+4-b0rFUz3s&u4PDzbv4CnR5UFA{;yt2pK?m>I3Al0hmDtv*-}ukwHj^ z@Nmd~3?KmEIU+6xo;cDAwYT^*&HN-8>jzwo2ColcR(dWNL{%>P*0?`HwS(CPqvF+l}S&4YVp@9^dg2c*;)hU z4IPVD2=4`Pd9$|1*k?Q{Sx zD`x)vSl%In{aL-lNC@nyrfxr5_XEjD8Re4umqv%>6m-wqi-Ow^=R@&yc?9VjCfwnK zm;sS5_73;!YrJzUZ$q5|4t~kqD^$`*OwaGvxzu3r`ea`e31(h)d@gKucgjJTm~Tth zo;931CUSSCEypOnl@>0vKa?w5$i>WKGj1{(^{;efmd}zedZe6jU}Kxa%}!z5FP~I$ z85lblBz~LA!d$J`IHNyv(UH!tyDU$h@u{Tr^s;tUMR?(GrSVkq5Ncpi_$5BRQ0(Ff zz^4j>1~*m+Jw5^EXz}ri*?W|;Ggl!F`U{mwv>{*Q?w^2P;Cg7xI$1;Y*BKd)?BAv> zCdbS}?Fv#=Q0U{2nNiOM=HJozr&NxRR@rIG6*YyFE5;Y=Vweu8>b{8L-!T*3V&Do! z52ic;7C8CCH$xV#C;O40KB`kCwN^d`sYC7t>9Jf#?K-v;HP*x?q3Ao)kFysa4QAOS z99cEt%kk!-ACoRyt0Rg~te*gEPs)0Wy?5VMGjrb0x$L&H%Czt?=_=EaW>bM)<|lQEvJ!RNQ_`531U0T~s^jm+6XKWUou zdTMos&J$2+!yxcI)@Oe3iZ0)1#g@gX8S>L!&MCPn-IeTP?o~DKX-NzLL}26F35(4g zmi8KCL#j(wG5ZNjBzVY_1 zR1l*J2`vHsyqa0fy_)2w5|c}?;V|-Ak7!pFCNt-hE}x>~l(T+7a6$s*rftJ~NW**~ zK5d8?LbpIUC0M4&)SO@JjxErdD9gYUJQ@rLrES~zQrI*;z~2iLGH*;yk4z7ZB)$Vw z*Xd>#{K^Q5ca{yJDx+!r)-(54xTA<|jDGaGVzz9^S> z1~YjmyhbnGzIcA!y zEd0WVW4wmCu>mx=UUhI*r1G9vWsu_(GpEJB8JA(XZKWT*#;1&XY+4(}RO_{c1sjff`2BB=Tw2z=zDAD`a9M ztp(FR1k~h#->f@uH*r6>NO{o3?qM3k0n#24~ zrTD^I*nww3k0~VoqtQX%0Me*^n-zzVvPR}@f-7N`Ax6H-{bPMCN^LULysU4u3N^CF?53nHAExjg0S5q-Ma}~n?_&W8Owu46eo!cus?v4@i{>s z<5Y98v}LNc<1{p@(*7#(oQaXx4Njp7&Y4yt)Yw)F+Tdx&0ez(30FPNEnHwHAI)a3*-Np=Nj%Lozd7T&B0EUy9=F zfPjb-`~;j&7jGUQ+}NKw+Qqp&+aRJ)B$Dde`c`jyA*2%8cw%Zl{YA&)Ald;=YhV6X z_}9lNas^6`ndLTjh*9r2kP`>R==$N;@9Cs~6qQYr=suM=6IwFG$J^o~*JKP}*`H-8 zQWbMQ{`fuJ$5f;=>_9H}0Tk4iT}+@g1o?Ud8Vph}w7BmKZ7$qV+yu{9( z6$)Jq6uV42e~W5iPan-#%qJ)E(eP)k4(ejvh*-c;xXjSXX}n6>h7^{a1~S|6vvj4J zKvPeY!j$6+2M0KbIcyWOt5Y_1H7iricvB@4M2KoGC-Z=#a_WsQ6B-~z@_p7J6RW+s zEvA^@e7=!LeJy>+#>cg(OZ9?Fe)|}=#;SxD$7QajR$ss4+bI@YQ5tn2*F@)m+9_d* z?|50b^Zc$Hyf85#i!N4)ZE}+&m_vhhEYpHFQgs#P{_MH$;LV_ zXz2~YrZ%Q&|lX%FjQyhKHDX2YickYvYEG_xNXxKnMW zBzUJq9|R#_0A75eOp}8)IYA{_P?& ziEz76z9k#YgYPCIY^E4g`W(Tn0CPP_`rvuPR6V=ti?EI+HzrhTnXS)NMi$$MGP0CO zvrr`F7Q4m{p@P9(y(FkXlUURM$1ej`HRvV#JG+ViJ?XmnPW!4Hh&j=ua`T|b;PUGOfq~@Hlw9j!_xs(L&z&66)>s&Ko{KWz{(JT?af!)4x0Q; zPDLAC*}o$){LZHnZIgFausIkNtS8#}ysoY~@^Ka7ZYN1osh&9)35Bmc3tFdVS$x1( zj;y~kH=?Emu-BN}MUQhlnTmD%-Mm7I+2uG=tW3b-uiCQ%UFUfCZ)2I- zB$xnulzGhulCK9z^)Oa&^UM>LgXdm;o|9k=|EL8H&IZ4Qz}cn8fE%Wu5%Z{&PDxcC z0$Vw%!=(!LmVP+si(2?i)2kBTEzVv2B48~1jvsQ0|8O9Sl=v`^$6M_=I)SGLjqUBD z6Sq!9gGb8EA~z22@vwC(@9BZ(qqK}cdjgPBSy?$pW|8%mtiGaV<}cR~R*!6`8eDU4 z4ji#o(W^RH7NVZIeVv?g!oxRC_E_WgCE&(pc#DSXYn?9L(_OX1qB!Lv(nkWdm%q25 z4!5_5G`85N&~{Nm>%^#tiuB#iSwFo_YOH%pDjn#MO;)A(9Gd+`|HLeG`&<4@vN8u# zhp!+-O3w=HOyR8e6RYqYEwyRX-t0pCnxHf{=kCz9-`G?ye_btl`lD)?MyZyw`S(86 zqK3JCt&@+Rsgj?Lt6np1{>d1zT$b;C?T~YF1n@NsUx+uyAU;7z6E$t|-5W&-eWRnz zK-B=&poe`M_1cl-KxidTE1`s;kb}U)M`|9;Z}S_*x{7c_~2kIR)_yF<{Jx(P#OV)dB7(S}jr^A2?QhG#r-=L<|(* z;#e?a%~t)ElkREuW-9QvENBsFzQ*%&J=*4xy~%qeUddIMi4Je?R+Vt0qlqa??xVv! zu{P|zYf(7Myw;8OUKV-kMcxc%Ci5Z6S!w+cxr^wSMj^Dgidqo!$0{8115(Kz+EE-7 z^3^a$$l(Me7FCUP#u#zHLiMTHiTUUIxgI%r^4rP7hQyY@t@4rfvLm(B)@C$k*EwU* zb#B?u@hO(;3jJoN4etUAngzW=fJjGZei`$`LuNZG-L}xI1Bf~-#0M>DCm%vM5_1EX z-=$Hh+CXnUTu=iN=@Yw}gZH1tW|guJO=^=3H5^zWS}mR!%_Nn|QBI?2b!&9ExCY{J z*q)?=k3HktEmb>VPr!Y-Uh#5+u(aEWn!+b_DfkeprZTtTQM>au_mW&23vLfW*LRI$ z{FJVF_Wt7tL86fReUg*8{z17{TD1sWUgPP*x3Rpb)vixKKO=*}G#@=mi<^%V%Xu0u zT37VN&B`6&wamgm7HOSN|8kz)y<`zC<;K~TjM;+2xA}UiS}e>}JqT%9cH0c$;#-#F zYBIM}XUdNRp{FnOU9W;ZtSs5BWBUnSs65mCGQ*dRG(yTy5U5TKyQx#1Fg8{{0e-8D z4;mE?53|=Q4u?{wL)Aj3gb^nnn5G=y4@3BGwa)Qzo&a1EEBeOUvD($?#MqDIsvrY( zuNPlL>$P&~H`3>4@q-~4`A$#3HBhDZDn+t;LA+|Ya?JUHHxAT`B+I39Vb~(y?j0=K4v&?HQi8Z{drTf0#V8+ zIPt8BYWa(@{_CQiaBb~;hS@Z$ihamjMw^O`Z!K=P*HJuh*7f0gEbE4MqojMJ!Ic^iwjpQ+KG09KpN zQ0HQzQ*>6ESWR~3Xi@II&IoB0<3c_M?&ZSXMe>Do+XYM;%)D*GLv(IK7cInYVXQ}w zY8m%r&8=`&>Z~6u!}op;<&DIRq=AQ0a?a0XCuXn>SyJp;FmP)-<8c;ERFkavryR zN{qrmDtd9^JM@`!EP^lPJ1a{NdqM2!k@&5#PjHHNpkKx^y7&9MF0aQ-FdF(>!EXx; z8d2*jFQGHLAM{NUTcLqiO~=eNqut0{K>P zl^g9>7#kSX*h8p^Qq@?-J$8Ap`gv3QzO!gni1saqX8Ky=IpowIQL%D9uY9=NICjLe^*nxJ12%by}b~lX%974mx^h^;I z0`6`{Kf-0z;w3ABKf_OcFlx7E85nPNFESRegGQL@oHR18G>c^o(C3bOy-Lue*-cuR z7U2@p@TBP;hgOxlIzi+5!~;djvF_9%(+2}-E)H217<5vxo8hjVI5E1F4gLY+~=oSFKjKOa|XmV*H1FW?Ygf&C!T#nDxXB;YHRr8t^Lg8-UMSs-m%d0~h zqI!p#oNeO2Ywz7Hyox}1l%n;?1}H0(8XHqWM;lOe6`yDP*|yGUg} zDtS#MI7N^1UP`i(iTkNH+(dtQRb-MaIgG@m2wiWFt|ER1rI1uD_D&sae`8Z&(Cpyf zd-q1&vtbD&^E$8GD1x-1XsgO7II$29I;0-CSsp_JG5nlg8AJa3%a@fBw^+I8ndLxV zutN1Eb*k)j5pN{@Tw$(2!Z4Qt4}8l}d(AUVs`>UeRFC?CTa(Sxlx+ZcAr~c7A-HrF zj)CNXx!lLM(N}_A9>2RM&Lw~%uf^`Gnn$VgCC-h0_;9>N2?1>`9TNf(|J|K-uLiY& zVd}7osCWNr1BLrk(+dqm@OSl{SzQ)4jS}Qbq3TZ`_6qq6b`(*z+ObOY>Kq+Vv=%h8 zIZ)9MrsUh**7%Ro8ybczI+Mwi$ti<%|?~3-^Fx(p)8dXyY;L#hT3Di zxBVoCk>me*m|$VE=f{o_PK>F~pz2z1i9$H>Rf2?ZTSSCEG(N`yBPJ*OV4Nk7kHM{r zGVo)g#b|jj=SU!mT{pug;RVg@yo}Uea$25ELLNyBHuBNFhzN(+pYp0adDA;u0$fYJ z=(Je9hO+FBet7#jiF+j~hA1yne5lUtj_Q~0$_`6Oa5E?H>7r7(;M@;wAKXm+p+0r? z^Aj-kOzZ?9yPi!}tw>l1#Tl&B#Tpinq4eINN;0To24W-oyat(IUFlOr$CND8U-K1Q~T&wCgF!he!Q%RXXZ7S>`T9;s&=RuZR#w(v2evJ#|5iYpj3u zvs6bt4mVB;QPL@&p}8Wt0wYY+TCzNced~SRcll$19BAuk^{TeNkqBrI`4p2q++s8L z35YkQhBFCC*o%`tq?Bwy!O(|BRc{}w(W_lvRYgLkP(>C;R#e+~8^dmjYhm=TuT;z2 zqQFP#<-QBg)b>Hl*F0CL=`OAQml;X5o9yR7&=dKKvP#_<11~g>e%vI)A$N`21kLY< z%5ov=wdc>^H%W!GexRX>u1Cz(&nK0%6cagAo|xq&3?WZ9`&kt<`ZyCRsC;F>#ClVx z=Mu%K&-z}%lRMsm`h;d-IX&VInO4KEs_x_pqE%=6TM8gye`__(NS0}o{tXLK(O68V zgL;l1cfVGU=B+<7p4QWHJmhR0oicelx$ttOTy_rLld%^UZ-zQmwJ5OEK7!%&C)X9_ zUE%j0`{~3`0=D?~lQjl}uhJd+uF(Y-C5Fl5AFbawj@NvvLCjStxD0(v&uK)G%|5G4 zE;pZ9eyEG~uZ%7!2$3Q|io7G>n(#YC+bI|H$a=+n=hM)#5AYYh;22_h(e|o%!_$Mn z<~FHm6?M<35O!LxW}h2B+`T_Jx~j=HxPQj>TCatLiM$)fm^mA?pF16Adgvri9rueM zpMf$mDfuRAw72Z^G3T6lPVY^hqyG#~J1VThlc7~5|QEB|LpKIZz0sO8n=%5Rw&(LV9q;URPAha&yn`Dd&dgeM>7 z5>5xgcEesC*&W?#AL+NP^<=sN;;Dcyi=JKPvVlA?U92eh)NlDiuFtzl5!R2X_B3DN zvvjEMVBaj3ixFzV4pIF`DZO@1=$LzR&p+dan zj0Va=Z_Vw+_K&I0jj-^Kct0_*mZKTXyroAG{_4Gu_&Aj%DGvIP=~e8}uq)pakMMO9 z_BFNYvvqwltG_&L>KH@aN#~Z0tzzDbiZRd1=*lH4Ce{et9vv< znUUFPxT^=UkY;f_${)-m>nzu5c}t0C9bWsxC&<5aQET>FdFaX8kNj!6(N2_qH`ZF% zYT1KZP%2h%toxY;|C@@}+AA4JqLiwZY2jn01=NZ43CbySSL^_rfDAshe4kW2$t{NLm8sRVFgAL*rg%6CWJ49RHndO{A#dlu!hK2!yc;`_TBGy#q~$Mx1sYrTHZZW)**EB2^^OXwkKu(rh6$ezeuP=*vlC zp81R#My>V4y+*Jh&P)o$MWl&}R|VoxVOD;%;p_e-MSrcAiQDdU8pz()Zd8gN6qG17#^+6AXMsEz?KgM~Y(monxpux}%Z=~rk>!oNxnZha1u4*Pl!8F*$PBbX!~x3B;dT(Vj||{t;mCDWjphoWY;9$G6uN8snF=>yWtb z!bQ-o>&2?GY$SeVki*1<#rP>)tlTrhxt2t4;v!9NlY&k#YOYaj4yuas0+S_s^yBuR zVitHyCS7)+)5qvb%98F+!7s9xu5luzqES5QQ7@bAO=rMwPoRQSigaSPzFacaI^b!t zFUzBB{Qh6|n7f~VGOvsKvOzD*m+Pe6vTBtbbv}?Rivr zs4J6c5?Zgt*FKn-kJxd-RFv4O{j2$Icwibo{#gR3HlK$3X!Jmck6fZ6?IxNJ864>W zS6AvBWiy&=xR;ENwA~~()YU~kw}HRGK=FOX{IFw%U4c!{}&nW#>8c?uw~=`jW3koMMqqzROW1dA{3Yq>T>l zV2(1V4dF$SQ-LdOac9M6QYS|~!nXZBbF>=h;F*KDhbOjX&~jBfvoaz60)=*%N2`cv zidFog-sGT6UVGBtIcC0=Pe*&rpuXTMYHDH(WcEP*z~-7s^^8djhbPcwWM{M(uQ1o* z%!8J1j6^8v+2H}-uy4b3O1R;EbOv9NigkH^(q?sbhc7Ers8;6J-Sf-)z4;{A=axR! z2anxde^h(s0V{;-t$L%1gRg7)_q-XC7>9@CB98!mPyxd;JcLmO4(s zm*i^HIFE^45S{Rox)jt#+zs^^=J*_CbS+ABivtzR^z^a)ruRhXVI0e4a|y1tmNNz@ zCVV0A-=5Qv#zMS4~XV&Z&3G$ zFjqL2m6p+o$kF^&{N7ay4V_W99`|MB%bka=)Umw`>|ocRR#}g-Tq6=MvX>0^wQBLx zXVI9^6HGgV)<0K9#6ca}oo`5!E}1zblW1V-Lt%zbYhY}P$?0G3GLYMQU>57{F6;g1 z;L3B}q~t+?T=ZeWI09LmUm;$soF`NC&Aelr^Ih|-UNOdHm?LE~cjuMytzZhOyKyj< zhH<2L)9YS1+o3aD4J!}6QyIEWqA(Ek>th;yOERof$o~gXaTtQ9T|t z{m3HqZJ^3^fzrDa>9@2(r7K1JL88k6&8H2YveK(cq6O_qjcpHkE^Ml^U5=OT+w)EJ)Sq5#!>b{fD3N>&mv2d-FO^fb zjrhUF#H`*yzmBw$bv;I&fZ}(b2&=I1<52^+b5q=4f9wc~m_uxmx9dF`bP>Y)rBKCV zt3dc{Z1q{gHyb&53x=i_E=)P;-h|3=OVSa{9t7U zgSsn354lP@2`U+N^>2fXcJx{jEPu zq*syNXg{Ykr)p-L6ZS6%%y&XEH*Vz`8jfxCaEZLjZS8f@w$Jn5Jzw#;p4}O|Yl-w1 zVX-RI<0u&Q2CG+6)F7F_*%+kHT^1LRr=Cv`d|1 zbJWh+@Jntz@>F5Jmr8>tN~~T8;hUc;*!^Y3K9&=z1h3% z_OSBqi5z^KAuwnm3Ysn!JbK-ep;>XU>L7m&e?Q~HvIbs^E@%r_%$O-x(q8*vx*^^u z^btYZ+4;gj<#58r+cEG^Thz0r@S_Tg6Cz{unU?;j=MlW$@iE62KZ#(~=*}YHY~~6h zu2}H9ipKEONu45Nu404Qs~iIn(GUkYQON3wZ}^pZM6!dZZKaU)xpJ;H-7`kbPk7;e zTPX_qX0mqihv;{tCxj~5@OPpWE-L48Bx zOpbx>>GphmL@`1oi_6ay5fERSrH()rS`SzehKYwX)6Qt>}z3P69FB;lIxQ%5Zq@CTPKXE5ysa9-F#79(R zaZp}})aZly-A7YBOV-}HA*a(+L~wJVXPs|4voosX-N$hAQhGG=JNfF#VRb_}bvdRN z;?-XlZhIRaspnrQ6~+_plu8ac?}0(8qkgmIAL@S^Q+WGeAr%21 zd#yVj>jlGdZiVjZv*gg?(siTw_rv#0cc4e8bbC@O3LA%+T>@1fQ_=@F@^l}|0pp$pU1^fNRAymNUgn`R@V#R--7`o$;8cv8a}vMmE<0{RDVK=IDfcw6Z;rjIeZ5Gq z&(<-cOu#?2w1|jiRAR{ekhUmEM<;=r<^XAd`?T#{;wfKyll1zTPc59^{lcjZ?QxpQAX}IaeCOJ9{P0&=n7paoKj;?Wn$CarXY!Vb zc9}$GkpofV%J_!7J}D>E!4ynySh97z?+Y%8Tcv8Q+$V1EeB-h)zs!0;GaffAx+nd( zGi|8rq;HTEjY9CU3I8CnB(Y|=j>t7{&(p@KAUjOg|GE*LyHV_9ud%t!Mlp9D`-Vco zi=Ec{x>~kr`sJv7E)&x^R*zSqK4ENc;O2JAKDa7#85uwvyea7lg z>ub)sbUGo@cK($X_2{ALzx0jXwl_ASp2r5kuh-q$v1m0rx7h<$(Mq11hNcOE+VPUo z7~}qCpSba4-Fp+l=)*M<+!X!TAn6)(bL8WY0D=SHVgZ^Nvi^EDp_j-H*<&leD5*uk zM&l!pMW1Gi$-g-{;-wM(W}7(%#b(T~>aZh`sU}P{){0!S?UVDsl7QeM#a2qpx7R%M zaK}>!jLhoUis(3oR?4cgEuQlF+ItkNl}983eVb_?4tRBr+TCVy3!C)qH*@l+1G*~h z2U2@3+eN134Oks@G?KiDkACZ9b?CjQ*zHh9X|XLXZW?ide+%4t6t?tG`X(QdapH@G z+xsP=>?ns(fekk^9PZo(`br@@hjX+M`E(UQjxV`A5i&C#-Jwo=#e3%bMYD)4VR8a$ zF?!#%EeD3jln!Ek1Scinir939={D9+j<9;*`eTlyK5hBXOEe&^{c}%*5 zm`7h5Tf0J~82w~R4H7d0L9w=^iZel31fLldjHZ>8=tSrySOZD=RgH?gDIPoZAW!}j zoUD-zP!UHj3f3Lj(L{n`ZH8K+x)pBYNWzPS9((R%;&}up8DT|fEo(0u z0&x%xZ|63zmPp(!!)ylCPudIfua2K%GMGbi;-x$@v>{u)ihGs8%aD{b@A;+GS$EI? zCyRk|8J|@luOWC6R}{~jI9p|;#?w1@mg?N2FLkr+&^cM}LvW%OH^cOyMq$IQn7fg& zp;HijjLj<{NNuuWzz?0XaE?}Y5md#C0%YUEMbY)F+GIOpXrq_3oO%QAirLNQ-Sz71 zVO<+(-DZYCx$_f_gwJL3p711jRjC@!Sj*$Y^KorEqM=86xhKh0j|ndI1`Qv<@gQ(h zrQ{>`d`wb9>y-el`k4eZ_rntoufYMQTs_bFBmM5noM z_u`d;XwpMsyE_et2v_-~0{04cd7=-!`sEa8{X|$TH%<+~zl4$qG>&Wt_nvP-0xju= zt9XbzM)_pa(3^CcFPm~y=w!-0`kLydt;H4L1rBnVD(Z>lv3qBA4NB znRSD^w9<(VK@l{YNO8bjHioPU7j)5nKjXhY;3;_LcNJCftrap}Y(1KP`kYyU+A~tg zN9FJd5br#3{3u;iUb0*|y`Moxiz#;O#b4I*y)q$PMK;sM&er3a4_V`+y=Rix(>8FX zFtY%?NFYrGQoPzMsh=D;gOLOR4H~$fr(Nv|8x*lOV*e$k z=8wLVGgq0RoiYBZkwwxxU%yhXZ$m_dSsY0qs7~z@N3{+GKg8-A)>%hH3>sk2i4~)9 zDxQD(!F=&JcesjUW9I6z3t}xOze6K_oO&7f6Y2Y)j~rlHM{Ca~MloALo|5(~D@0(%GRQq`9si zd!F2wDYJ~sUdnJJU;m17PrKAMC*P@_8BIMNg@9EQc1vhO_DKFjBFZswS!tP)v!|~L zuUyk;67vHrvvvKWw(Jn!^a)tD_xR0ltrA+LxkRk7TZFYspwUPv;CA#dQv{_U%R*}B zDsHBC{-t$6FOjpA#_R7T;TA(|BuM44B5PhEk12oOMc_LT#Ix{nt=-RX#?C+joUGf( zU`>9jQ}R4@@c#mJK#IR3)`HY*Q*mc-!bpdkTVx*cBQ&ii(itC~JS^{1P}Y`p)BF;fzYp8$H<#DQ_=OVPt8VQ`9gKRIpS@#re+O$?ACE25sMuQE zy8DvLPq{5P1m_=44Ss|1V`x>qkO3h<$r0NXERDr}!yeBpzwnNT#i>6Eauc<_S zv_I_kO|nkAc!Q)`-wuTBaVhfTXEmtSEfAYm3vH0$oP+O4JTrN9qhH?KId8bFfZK3D z%*>jS$kJ?Q9q7AX(>y(8)LGHov%E4}OAi*(bKC>PW3B9dHuy1Sz*Fl!X9`M#L)7z5 znzz5#_2FudCh@)7hrbJAe^cly*0fCy%R{ji`f89j8F5O2Tt23uPIBcpucEw^6?-%O z8q@S`BGaVJ>I14jHPs!Z$o~Ky)u3$Yt`q+9(t#O0TAi({j@qNI@au5o!b$%CHAUT( zvrQGHZLTkJ>v5y*3wmd2$<1SIQd%nfi}P_ED$l?oUZggX%H; zjbJX_wsT!!Oxx{1O0W(lOd2by>YUokOZq1??&Z+BHP+T_rsdT+a9V93Z9Ph+?)#v2 z+21*Eze3oL&a;RwmfbGM*kFTIv|gRZ1JF{X_GCDH>$}HV_>PhWa|0Da;Z2wh0zHl> zH!n+K8vqg9(8l3znHl#q8=;hJHRW|qOsw~+!}kdUDN7=QCF4IZpJ7%vc%U}8CbuED z7cEIrGN?ZFPUP_*kyA^psGMfI4Xc@@a?# zSxKPrF54MMpS?xb1bLhNYP8)5%2tz9xo{MHe$;;A^y;a{B=+sS!2K5<25-3alS?P}fi zKKr0~6sZxDNbIfLOYf`tqqS@m{h zOm)Jbl3F7^rB$N+n$4z*dacM289z!dZ3@|_(%DJ2Vcny8l%>zqlR)3wvNffxDFM|o zHy-CWrWRu1rtA_F*+6SJ_N7JSTxyq#DgkIIL`^bE?42WyI1_f*aXW$%NFMZ$QBsAn z!c;nr10;K&^c8K+q1D!1X@DIFYbJ47ja~bLbmrm)-~)S46HLZi47Rf`=~s6QyLcUB zlha^E>~K$|GVvyxbl#5BuBDU)_KztfpHNBWp7?s*Ha-(=rrTegC1m=^s{C^DMvmGN z51GL-BZ`;egKBlL6ccuN(}@aqdZd_vRJshnA9}gZF_G(C4Rs19zD+vWN252a4cBLE z%0Ll3)o`wvJvhdA&360dYVJ0Z3o{4OrPXajwxOQN_o-$6R6IJe45TRlE9`&oTD=O? zCdj}E1xB;>?R4_2fs&vrO{b6izY~#zOBB=C8!fOO?`tW@NbWhQ>NlG%t&PG#Z6p$y z=QYH>(*b96zylf04&}mb9c{;MC`RZx=JwqJLtBT#v13fRI=%WR**E95Yh02*u& zFsC~YV0~-k1iRD?ke0?=awZ42y%}s7Hx0DYt)&GaZ8tJhJx}XgdGoAGggYgZC21N2 zw`I{qqf9uXZX~wSa-Tv;!8D%Fr+DZ~%5^WH;@ELVYO%=09LtY0c8fZ+&*t*tobUnMnL^lG&TT4AX+ke-d%k}v3tD{Y zS11ks1Yl5t#GuKqeb4ChBOPs;D;wqm==dk@J|N*eA)NYCpWXWH>~%+o-X4&kKeZ(A zQhpD_+GENl5pa@7`q%9=SzFvTg8Cif9%GUAu3XcNtBP#A7Z}~M^A?|@>Ww$Fcz0R3 zBuCFvkba;=Fn2)lRrCvQ47qH8JfWsi*&qF9&HC5r#+hxPl%N;_2enzYQ-L0pB{aXf zhH}4b{Hy-}>3@l@o$Fm_CDot!mp4*>Q%o#BW*#K8P;s9Q=}Rd;@f=FOy?&VzCTb~t z1!W_2k7G=;zvu(R>}@}2T1!M)_-{-|1O5tG2lSyB(>!P&an}4S+jkC`F7?O%0QFw> z6(qp@Dmzk@6Ww0)j!~1gtSU_&E2nq^#rGF$y12Qr)0|kz3NASxZ}qJezwEoMonKwP zxmoPk2p_E1r6eR708bV2lm!7G*5v-pY@s%mt%0$WBhz4y?MQrIW2)RT$k7$GK$5avD6I;e zNI4_(s?EwolAr}fd|sp!*`-jB$H`e!WF4SBys4}oY8)JyCEB^q*EvBfIX>VP$ z*TjZPZ#ip`&y<{F75XmkQ)!l#tQZp>wdPu-=f%GJ@8qoHb{>M86l2LWG@ELf;+_so zv)9&^`lGJGwrym75>KsrU8BGx?Z8d|99N%sSZ&s|P#YK^jC-2dGNaNlU0Ee;7}keY z&jnm{WPk)grq)kpPC?=;Eo`R*LP@6f>QhY#!1Sb2Wg19&_1q&WfInJEcc`-2CB=ar zw2NHU)X*tOgE;l1`fih|>sMejm4=Q=j}%kg308UsgD*ZiQ*@?!U}Ms^At79ej%go; zG)AkK9<)^g$X>T;?Acrl}1Y))1dhth2#f2TIo-hBA;j=O z;<49SzlyA%ahD#pbf*SVq5f*7{9mgz{{XAgS$Ov;bb^0Mm8aSsndmmYFx8uB9Xr>8 zBO9Ba@m&4;cY5sIT=h@=eAW#fH%d%BrM!{<07`-X07`qI_>Rv}3sPEnqMmw*iYcYh zn!e3jHvu;YaY+jxei^1L>&3IvZ3O`O8q8We(~cDbm_6 zCnilo?(e1LYml~%87PnH3KdaS&%?9qXbp$@tAwc^@jp5Z(A*emuDd#)xHL14j>OTKKj&mB?-fYS`T~nsE~9tu{1-B}7S$zzzqkPfgTZxA4`-v}>x`+z8$J zaYJ~ai=)|=p`i~PKf)FT@BR`s~%fkU69i*KG#q;1{*uSy#=J{ zsA=1^-5-f?ynN0H$7*M4$m;|6mk}{vG&b6;Wd1i6bM>c>Kx<;R+fT(lw$gxA2#_K` ztlisT{YkdMMifksK~HZV=`{HU2rxbAW|?ko6?W>lLu3OnMX(y=TsAb8kl*s9B0nlm z<6EHpvkj!5#7UYu>7dPu0$W;$DFZ(Ahs67Xmf=BSK`?t%;_P}euD1X|sk?AkSV#b% z@}r9D&Lt#)CDe`wtqS?;GLEd0nWkD5yE6AG4(m>-llVjuI5qXkRvTM|yH z>HbNIWcs1NJCjhz?nNTBS7l|_XgH@`O3Cv=h?>~hG1IYWpq>C9T0v_2DRBWL6EVeW zf=CE%ARhIh*sTj(4SZXPQQ9ZG#a`W`SFfRlln{UkQ?L)(pe`72tT>*OVBp8+SKD+7 zLoBFl_BrN+pJ_ddKC}{@A(aB-hz)E2e-xd*=lkN7ZFbQ}X|`LwE{%;ikspyg>ETLT zVF@oeAmKxwr2x&X&&+P&_EgcxhWqC}*se8)A;{q(mo+75%`I-bWU!1n9-gATKr=(% zYSt}ZmFW*O^9uYak^uEd_w=Fd^ebDd?zX3GaQF%klAuC>{bTK<2oePxDSI ze4C>rk*`*$(_rV0(Lp<_$s`^rrvU z;X~SKBbS&TGD)h<(n*kK+MPnd$YF7C^Ql-S1X4#@`Fly^QwH1)2^o`8Qw72ja1B}! ztsAq?JT%cyiZw>SoCuGpt?iAHiGrE_)t&H5DR0KQ<7G3oB0n=;fa9rUlBED5t;gAu zsL&5YG6GU0(!E;y;O&;D1bSE3y1RgtqyUb<)>mJ?RqfTgAed1Ysm;(~vB!q=m1=ii z6?$-T1$u3bt4uZ!&Pl5tmV00MFiE@@pu z6XYJ$S#SgZCPhD@O&4_pXR*kr%8!|}K|ZFb%1&g-m>dfE#|T`fiUQGMf}0~dmQQ}w z@~454XkUscIg=(o?^1s0kg(f{UzlL_tO_yO=bQGOpc0ajmR$Scb+row#Dg`5)Yg|j zX?i+YkP~X*A8@+al^&Veq!K$;h#DqR(m()tClSRf)ofYREz1wI7TM?8jcMN@>MQC2 znuChoa3CJ@O<<$R{6*n5)qQaKiV_y72XK=<)t7g|jW|x%MExt;wQD}RTU`NDkYkF& z+}nnhNK6mviboxVP1P7TJQXrU!1@%`x_5^ivs(PYK>7;U4?$Ca2O}dDoKcj7ZCik# z(nkX{(YtWmEPzjH5r+xX?$Shr?T8e?!wr&6ES9KfyEw3=F91y%-MSLd-38TheJOiR z?J^E({;?wmJ?ovbqA@y)aO1F^1rito1BxBD)SLm$7SSY4DrG3*)EgBJsc>xr74wai z5!}%&q$F`h!9c;2$vHLit-zxqcokEk26^p9!T_}9xLXSCL>x)wA{V%!N? z;0dA1p$o(eQIsCNvkkbny~_whqAXBF+=p# zG{i}T;0Rjk4zg9$Nc>|Wj15bR_Z4N!TS6UddBR9EQ=wZkdXoe%>MG4R zW2AYedfOdK7Oqere+^lWNd&gK^s8Wd?|D*fz@zv|$}ds~un!2NRSi zWLMWPl(rGuis(+xR;VqPRMRfF+Dghn!0$kH6src5?~v`{XuDUEEgh2%LB&9&4Ot+) zDli}lsuV+brXJWH-i3fxNX&{OOk@om*GL~U<21hbha8b3afv>ZQ=;MB9^I=%!U~WJ zhp?VSGwOx*Hr6g0wP}XSOWwCo01A?GS(V)7f?6mVi>og*iB%6m}gY9Knu5FWD1T_Kf>?uQsxq#4QNz z^3_%*^_j!_b3DaqHBCMHs|#lB9C^mtRHfH}^N5TPVEySXm*Kar-nhDc@wOXhn`c#n zinc!{GKI2{&Zx|^+_t#%n)v_*s_Rk+B6$^FLP)Poqok5x3jIU-9K7oI3eU|wDM!)` zeqGMViuV5i*q)|S;k$+j{0{zxp_N=pL#B^mcI$7cpURirUadZ9CPou9q1{e!0Y6hh zyLj5Lr6zqQxmG(3hCQh%6Sn~RQrnG9V33ovM>Sb3JCfiT?NvEyhd%?New51qd6YY; zIIsAG8V=`Baxf^x>Bj0k4qSTQENIYJD+0(MpYFQjoJEKGj|lzb=0CNl%cD zDzenzj8@`n7-=$E6YEY|WGL=GVN3Le6y3kf9G)qj zx?-Jc_eDuw(}PNUBzpcM1PR!lY7_3tV@Il3qiIGd!%r+V^1v&MO(?ZiQ69A2wI&SU zP=hHYLmL8Al1@l5N1%v|#VBFlI7l*x=8G>G5P7F0_N+16q_^mOsC$S0Qq)j=k_Jsc zgoTnoIOb}*m*KUD^yjr}7K0k;IM^uw_wv*nc2Hr+Gk`@YwA3yX;7dpa0LBe#EuFZM6qEO?lLd~tsdFYq zJJS6_PG;at1p5vuExPPUNHfgxYrhbo%2ot$1vn_oUui01A_4*RrK)IyAwoUtX!EG) zlHkGYMQ85YV7fw1(s;%w)~pQ~%{@iS`|#p_2>>5nXuFhv)MqA#@be1zolmHDEISAF zqjfgRO(4bzsW)-4puRu~F@(`X29(+}x>5-ZoJV{`b{{Rsmi28~jSG{eUbS#h&x4(LY z^Xfe|*hx@ygegO{D|s^G9rLq!|cNsCxD!Y9_wWeEJsR_<|ezltdN2fW_H!T262{F6cvHH17nx(<#%|TmV z+&VWj`A{F#IIi^+ryVNDw-|-#k*@fjmNjOX=AkPSyZ&n{?u<@ywljzEZHTU#^Oja z2mGVQqc>`pauam2^EJc&ys9T*> z{IP=`ywq2Yl1tE$f{LMEh5TUicb9h8LiqvhgzfpgGC44K1knumrJXZUeOGRw^h}bZ z~G>H-R=QwGg=; z^<#n)_p0p%NSTw0_>H8@*QWN4qp337SEK&O%Z_||EuO&hUUMBb{l#wo0AvsJ_2*Dd z9ca)>?F|#u&7#9*NKv62WJJbCBQ)TEqyGTNq3)Pb8=D|UZfY(XvWN}7l3S7qU)F?i z#{%&n(Et*uQCxCFSJ&lR2Go^<`cn3S*6g~`PdX&4Y4r7@5VAYwg|>8tfFmNVD%~(< zoe1Jd$H+CrIMRU*HVm480ZWAsp{Xw3QRO5bS~dzG?UN2Dl!!H1(?Y)rp!?B?Wod|1 znzT$FpwG1(0)U#+NAoBv*kXxp&4i4iNcF0nyA7BI54ChU>w|&__M(}nJ`?t;E?x}| z4F=opw&4m?gV^?|{8CAU=4ic}k&x3IN%6*_N&IOsKWc5N7ZTa+G>s1Qe1CGJq--r5 zeT8TqxNYE4MhK*Ev}HOlddVane${vE{?lu>7fP8@u}AomNl|HT+t0Z6qWmVu!|CmU zBfM8WMa5CkibxOL?^Uus=r!AnL~~V^ROFQ?Y&_6?ZOb7%(*{9HBe6A;vo&RnvKwb> zP85^Dn}dn?IzIMv+)sc4kQo=LEI!x zJBrsxQUU3aKgx1{O3dw0{iO*Y{{Xq1p8lWSwFIVi#LpmcS!_bLT5PIPkRpd#rNa?d zxS4Fh+)Y$cXCTadt5B^`LxI4IltJRC__Vf@3Yf$XTE6AL0gh=qtq^3ZCaD-Zu^T>J z;C0ARoJ|LMl(>Rh!R$p?TKsd(RuUV}-ko(o6X-9co4dx>{LXS{{dN^=Ac8@fSEQ*f zU1}ueAAyQ8^~9S;6Nv($DCjYJ7P9CgG)GcUVzbWQbXV<7EiMw`0iJ7`j`;OqjYwS0 z78N3d5JgO=3{tg2Iao;`4r;Z%LOS(Qx4V-_0H+@JiWzS~%~q#zLp<=UVMW5|@@RM7 z1anlEe3MkW#%Wu%MrfB;CZldu)hFJmY!wnFn(2)NAf-eFr->ZWC9zwg@b^g*H3hs+ zn40a|O_u&q1ospZ&TVcFHD#ez!W8HqdaS#GW|f<%Fl*xU2sDkEDhYAykv!04@{n`E zni}Hb7c+_x$K;=JNoJwa-bwh3;X-JAGVg1EtFek&E~Ge;dvjEx;~_xzri_WS9eDUl zhB#3*U9+2C1}AniOP1oFx&yGIV73JIA}gW{jQgWT)&X74hil*d?Y& zI4Y<;sj0t}(L&vbB_QDaX_II=6|_2UPUi{W806DkLh}3$osd0W=23UbpR~~Jbi=(= zwo>~401zTYG`LS(!)a{*{x4HT!R*P|xwZj06k5pM;553_{G>?CQKeg)GIR+@YCN9t=c$0mBIb!A)(p&FgcS6k9vcuk%}3oWfPqH z*Fu5p3Hs8V2^1-#zjT0pl`N4MA9}UI2+yV{Ww=LqrK)r_S~^n%&*mt!ZRy0wq?h-- zCdniU&p)pzrTn)C(rI3aJ)VF8I3Q-OLu3vXN3oj6XkIL3_d_>O3GORi)!JMGYmg5a zr;0+fNULrWCaVET_XpFpUQ@VcDh@c?LP?mT#8&7HY|cu2;Qn-5FhCqo=J^F=k_CP7 zje_}N3(I(Fb9xgeQ8C-yQ*1(iRmT~b zsQx7$i!9ICs$0OU0l=s!(3d^qG>wg5D1h+Vvx)6Rs_StiC@6tbX_Zn<2D0Ysjj80C zan%X({{U)1rnr)>q=}|M#d09lL0_?gUlSydc;caTaG;1IGsOrLp#ix9e-|Vgth8Kn zsO+eWgE{-t>z+mJG| zfGDIQRurPz2@@yMYp*(%nIxF3+YUTl4k<+YQFr$nk|cwOf@-QYurd!^CQeBvf_c>> z$RR?j>NpUIQ)+X$h^UtelLSn26wQ-m>mf*t=hlZ>l9ER0K9f-EfjA@Dmg)y$|#A#QCFIsiYMJ+iiFPAnP7$9b}CSOQbc+Di>vb6QP9Pb+MgrIwa`Ah70S*Ebt>b|*@PjW|jH8L27UW`wsTleT10 zDgi=8O`-i+ONx&5Lfx;PF*F;BP&AAw*g<>VsBv>6(yD@^HD^qZy$Usu-3k-iij%`| z5OWkO)4U40!#do2i}Yk zIlE`tsp%=g*%3>*RL4fBuV0qtO%a2o8y^R$}rRnMwRVunEFszDZ z<87|gl!8d@NG??i^gmizXx*1wiGngHHyninS1|D>YPt0Fp>Be@o`RLDKyHfqgcXl^ zlQRT_R{=Y56fjRt=)wUzOy;Q%EjPe8k+3F@s|UI+ zbxU#kkk}ysAOq@YYMD?+@Xie4nK^VEd#2hZM9*rW)&aF8vkm}Yp43z13!!nA-3>T4 z&z)4B@HFPG+M9I{hM)pq(}z;0E|?A|kP>9pecN{4*KSLMs2~6$l5ayqvhei^00&^n z^rcQ9?F^|1j`>{AnJt7zQjKurZeG6h_yKruH zwymo!4;GV{=d#kwxvHo42YwTtG+>!6elavz?)X z_vI!!fnvLBs7WBG$8Tyz`huKX<$qYJ+jOO4<^92?s@c&Dke7NrUdzQ=Yh;yNAnwQ$#V~JKw|xsrRum7tes0yYU0+QlK?`}Xe-lKS z4%5V%jgo;5xR~r|c|T~f$7Un0vB`6+@uDunB}D;<)392*!PChc5a}l942E zD$iz0%X$S%g?Go$Q2`;pC_kNjYE$gm?V(GehwnRu$jAn#-_VZRdbk>4c{{WuoAbq`QUcI2Ubjy`py1SdwnQcmrL6}N? z=~%QP@TH1w@6qsyT3utfTqp+##&KK3tE%6!g<|#Z z4g5vXQ$AewPBDaia>)y(FWn3UpV{>Q}yL6C95m`uKp>g+JfSF+$lS+spK!H5hJ`%md`Dvm9}#@teQ_i zmd5wZyD(A`J%%gblFhJEyW`8+i?-H(-D6O)2qV(B8aZj5 zJ8G1Gu&5!h0H39010mf_q1YKT^F?K0+xISnEWIfMFbKt4vVQ0xfBNOA&nVl0IW!&8 zgO3oC1PGpKplLiks7%v}EQel@1jzQTJ#n_(VV9A;N{=_8 z^rlA1CgxPpGMI_Lq3$hh-*EL=Dci>sjcSHlU_^9+9lpXBPQG1eJtwD$;Xc$mR_;HbhTihN+1QgZPqj)-^k%6P-RcayvRXdt{Av{RL%|%J6Z0D9u^iK6H-GQ_s-VgjN1s_^6 z*s_~mi;%O)B;<-zT5YIHrd$yLCMwp7SdGLLoO+n1=HBn1epa2>V;Vl5{k!c4;4JS* zCz2>jTN~?g3m2{-2~iz1iTZQehy|d636=Ly?KDG5bi7=(cqeug+-9z_Cbm3gq%yF( zziFkS>AQ715}uwX?0eOz)EQ~wZxk8@Ac90iCbm_~x$zDm0SDGfzLjB7b+->mx1Ee9 zs0`C5^i@+r|3kStQPJ zL;_}eMMCKyf=6RY(Su*{HQlQX+a=wINX0IHT7BSmI&(|4R?DuoMseD{X`rM*99I@b z`5uNCCmUsVdV{$FPt?*&`^$y7CBi+0GrJH?C%)jCREo5c&AjzKf6caQ%nO`*^ z=}3d0dZirv{>E0X1h@}zTU|2mb4Vu@=WM!E=LhnwZl33nv6?SNPWm?%w{VnCtv}OW z1eKLecp|a(D(Wq?&*Ai~Et{!ru#+V7%+n@GtFv*Y8ONLe^vNgFwF`FXAh_J| z$vB~zbxx29#Cnnasg$gp8httP+hHnN0{NKYoQH~%ConTvi$)7zozhRd#Sr$Q2nb-3 zvU^r;f|Zl;GpSSBa+3&J6({o)-}jJL;def=D<`TjTSIPKI$kO{9)iAZ^2e#uZSa-w z1mM=QMmEhzyisyD6(32in}H%pA5%zeZaP;4xN>{ZKNr;`2!r0U3y91*L0OOodJWsP zq{>zfd(h?S@`H}`)TDG*i6nQVqcun-*7v87VQ29JL@xFOsUz4;XOiNA_U!}?=e1dP zcpw2JkF{p#N*<#5CjkAq6~`_Ei2*;QXM(p7qCgys(N-l&Sy$yEhajlf&8E~$%uQc& zV{Y?h7j}(052{9JKZsL8RWK`~QYsP&AW%--2Ab&4UFrH|_MnR`!KRr^vKukxop0golcd4g$HXGTRSdUfj5?oi_nw QLCAOoeT{42h4WtYgomLOf4AmOrv?`nE z+N$X=0+a;$nqPIUY4(nQ^TkAeD`IOUeBs!BG1o2^FaYwVeuANGY2^gS2OtAW`C3*{ zN!1&Md`PRdzw-S)>2f@(Pzg`wdsS=G7Wo$%o9MW?yKH1_%ogE=??s=L;U zH%nz8>|{!h^%{T9s@S!&Pjhq`WUFxUjy+9z#=iB_uD!cHbCZU@zC8MVfH1@^b7Sye+E?s9)BqHRa<{$WL6Cd86T1-Nv4jMSQ11YFsyg_|s4eVJLC%iK&ewEN<(; zG?wd847RnnNQ2L%U1;_5_Tf#q9SRZ%5mRZWMd1{XRmPn2A2BNGx8K&jQ*hcAoP*w} zPN#WjLN6OZZ`5|HE-nj5+|+DVKr|EH#mNm?6uj=|%47`Uo!aT)R^?sPD?ipZ7yUG} zfZ*iCGq%<`PW(E*2LRJ1tmcrF@|N6MTsufGXj_FTl>D+4JZ&_`UT6LIEO5M1+@X;)Lgl7THdRf?Ga8>slt;Jv43tPH-xbRz`9dTC`M{ z2nQTg?prOK!ay7Ynh{Ty8X`07Xjh+63sJ%V{LOSlQXt;wW8mccMfKgoqC!jo^{2M& zwei<@{+to*LwIQ^V@^Ta`26I>5H6Zdqi~e@)>4ls`_rwVUVx6{KBi6cDSiLKe zzo!Ct6)P)3FA)33@uYy*I1xnL51j+?kbcw|ld*AWMc*>A24jj*tX?M1s$PLG86D%Uu35{9Y|fIiE33P3)}!$5 zHs8!L2_4NHZ*HqV7H!SLyn@-lsx?b@H3z;Rkanb$&v>NQPu(!m1FpCc6OJe^{l!S+ z$rm?Bcyr#>}rnw?s25d1J8bHH}AdDY$Yx4j41vg z>sl~zTOzTzx@Fdvy)rjoD;v*hIlIAV{{Tqj6U_7Xqy6+V5RpiB?F3k(+#cyWcHxeth9O=>8})=E9zIfjOx#os(MMxXdle>swQCAB>(v!|bh#w3k%$KAG+IHwDZwlD@~ z?^R1wO`%*T0rUcr7kc`ejK;v%B|Yh8 z=)Rc2w(cEr8f--R)4OXo5`)wr5;GJIn3#oUIHrZtcbUXiMB5c=32mjGn3L;Epd6^@ z=|)^O`4FUxo>Nt z`IIF`?b?X7X`rcbuCtMqK4g#ZC^Z$?lWYy$K@NWbmu`<`4=aD20jQhRT9RK4Z5U?Q zbM+>j6$cy*`^faF^#rn@8U}Y@#wir3(nGk~wqos{Hy7?Dw4qLw0n}Qsn60&xH)@8` zNeLYK(i=@K($i4d(;xzp;3~ zx&S`3(QNM6ROH1?j+KTq_ zs3QPYy--%2g9`?jdwXpF;-X0JO)fxoz=Vl0-0?t{;b9~VhrD;9+j*yS=hEt7a}W&o zq>|CoiJocVrysR+mQK~ScY+Nv@aK-&yl7kBosXnIQQ*?w6>S&GY?4q@l4~5+o>QqI zDv*U%f<>g+?Do2lQ^plDP8qxRuPM>my|Z`_>0P|=u4`#+cPSa6Bs^Y?N5#}tbv@}K zE<4RgyiX>UjKke~9MEnk9%`jKJ?gTo))lko96hRqID6Eba)()(WQ1({@hZElU8Rcx z(|EX8hR1oYn*J&T@?xH&Nw;(%Z>C*O;a8<2?t==oXP-Hw-ld(ZJz4xyO!O;P?3)0B zSW)BpO>I?X%T2*_m7i*<7DYCsA`G5rBy>ti$3}AQ)2z8wDAwFdOJSjbTFpTXT3fjD z!~l_AYkR1hy+Ljtz;K^x+?)&H?|Bc zk16`odljQew>xs-k%`)mFWDUnNcxDl{-k zo_nZL4@)4PMLk^;Z$Wx>pnD)jX^Ss>!XT1^0xGLh)7V>2eg$0eRLjT-kV>SF^{Pii zWwNV~QcB)z6{yI`rG-yjU!kIBae$G)tpeH)sHxn5*)i!#bd@DKmBq?|E!(7#12b1! zM47SE)RuJXaP9y|%?YO4yzhu^A9RH8Z9OsV?_AOfp3<0o#P!@FLP_9u`_s)mw4G2X z!RIXCN4+z38UpOLaO^1+pEi#-rwfqC_|${a3g8*$zP3x(cTV13Z3si9 z#b+=^0IHJx-NowOYvhT5l24n^MMimc1lK@-TsX1Ud9*vqp<9IyaM5npV@_K>RH!k7 zC+$owLk-;Yu9{0lS?WgJ#*=k?%T%4<{Ifm1>M~lkCP^d$Sh%-YE(vWXXQ-Ox&70R2 z#BB)x=6xvpi&C2pjxG=ip&`TgkJ6SQ=dPRRykPbt6YWANCvhOv%Ho9&up4-#Nr5m5 zMh!)@_1-SNqEM58+O)|c_j6F5Hi70 zEx3UI%2n8ptr2YM8zeZ0J*0C_TG4}UHdOl|!f=5x{{Z5zb4zLF+(}DrEabu9`%^4; z?4ROp(2*(XVYb3!)c2~KX@>4fu8>v+C(@cX;rqQrY_fC(pk(k3bZK|Yx`wp|x5scm zG(QxU#r!ftrtDmDT(f_36Co}biuk_c30UZr5rSs4cG^EsLxI$zvO;2(-)K!X;oa0x z90I5xtzAXcvWgB#0os|kY@SMKy}$60IE+$@I0|6I_NLfL0Q(y5XGD4{C#Fdxku?WS zL}Ed!SHi?_L7~VdB=_QwWKWUq4IghE8eb46iHbJeD(Pp4sATNv!_Yn1+_)T4>x+J) zXK0g6l-J9XQE4!JjY^BAWj+PiDG9-(+J&0iCvsql^oJP=G7>0T?IF9e6n^x`Nyfz9 z6L>vlT2S+%nXOXpw6tR7lCWL_sFHAJxT^&%wOC}U%W7X6KtKM^E8cWn;)C> zHFH^KOteB=S5fdb2HG2xYHZ$Wmuw+_28$#SDlaQPl_k-v#h+0@AthWMPv=h4xz?fA zBiPcXT^aAQ<7DH|r|+Ip$-ryMKi02q#fx^rxR~5@U+7t;EDZoYds#C?4UM3)`9iHci1K zz)1aRn#mx6*#5M*K)7_OXVRp!H3O(`y=;wyhVeXeS3SI-`Jf}Xuezye#hzg>4NY0J zLP;TNGNZ)P{>4g;+AumtaEA|bRv@V-%t?GrUcg5t)(J*KL$bcT^8ed|B1T`kQ*QzZyaBeglx z)Er>J9$W)K?50MdsBqiVLKPyDwDxnzqZ)cRp+xxyagk9fBB|=gueVmA8#;4c4b6O! z`O!@$)|f`XZW>Vlj1iD&m4>>Q=-rTq0Oa?ti+i}G_tq`E*$NaIv76*T|BBu zJW-cS?GmaTwu+WiMh6ruR_rmCOA11U2Q>LFF?Ve)vQ)mr98#C9gSHT%Gl}#xSBC7{ zWwa^wq@KjhJR{1e=A4r3uX4U54ZIvvZ9QsixJpsx9q=majXg!~vv6DN5+W$Cf@Svn z&4NJ4rg-HS#fapjtQSmuF7Mh3V8=2()XcOUxj`hV0h3e)5}l1OkO1@*aCxgGEZufY z068X_3+$Yq&~?3|Z#eo~cccgipKkS5iWw}rwi`ki-LMGB1HayaebINPzi^^J0urBU zGTF|RvIA)Vd({+@c1ee6>TSI_fR#ihg9pE5oPq=x6H4D9 z{Y9tLNI;R(J*thW2S$$N^*Yn9T$aFr9pVjVHS6PhsUd9x@ZmcWdzuek)Y@v?u-eR% zn9Y4*aLv;QPUvyCDosjs-3vUNQGMggwQ$9wHxN{v?)Q_}S8kJTn{KJNJx#JfJ7m(U z=k3kNEf(5!3ZABBfz@>!xC4b{M<0G`no!^9lAKzx#ofZx(}(4$2pA;$cA&KDa{by6 z^WZg&&H=({HP)h=i$%6lfJ6kJq*c~eX*Lj%D*{zC5)r8S3f6TtuI|>|y9-6S2}40% zp1*op9xa4)#@NW8TH-E>k0=6U5Gb4Xikm>e3KCTuQooC2T%G6{(R8Ju$4Ce#85C-I zt%NSsVJ?xC5h5sCWlB0uCvYP<hgG;~30_I+({ac=KXx6G3f zAki0X%Fo0~!m>wtsm4&*Y_R6$3Bdkur5^KbI@pyu3Pub;k578)i(@fmtIoG_658BS z5;)>3x5PTjs&Yx0Cl&Xd#zVhs?Yez>7Tcd4w;^M5xta%bSLF!Y!L8A-uSL^aHld zTCP}ZmphY^ex{l6U$I{&)_|||Fz~n?1)v0i66b%l0JT6$zX;vJ+-BX|he22#R$CAg z=qeiXH*Ym8GmjlXexkwVF9Q^>NjP5OxaeTHvOYXO7!cvujgn{mA zEr+M7@}t^|4S)h#C=MRyHF^h4u%bafr9`b3TC|do;N~(g4=aFj6fW~s?^|9q0VEP5!X6E0y3%{+oIA)NWtQ@hJr4ig@mUh z=jm73yrkR+g&|od(vhO%bxMiIt8H%2QWH4I_Nlk9q-ky&1I!K&aY^0Um)1$*uQy&; z0Qsm3=Tf3lp(|2*QuY_1BX*abTVQbK`KA}j>N?)gU{ZTz{{ZH0{{TQyG{rA%yKN$A zr>0ID9qENFG$SQl>UVGmP*9;zs8o`1R~MMsCkv!|EgcH(--U=&eMM36Y9xho81$kp z-C()YQ4$3xxx8t@u-BSC)DxgS%I=*}TqKXMsoUye-QPA)q0gbE&)tT%oB|K64sIHC zqk)g_L2P(FF3kJ&$SwQg^DZl5$t%=94pda?_g|X~(2U>7F<(VzOm~>BpqmmirM}eQMwyqSUCw zRzF479ld{0X-tlS6hKiKHCaa09NgM=TDLPG(G5NCM%ZcA*BWDwyh;ibWkAqJO}lA> za9LkKXK@f?)+&FErDLqAx(GwnY8@21*)G1pd6Bual7fB5Y7XZ3v%o5>;^LEj#!^yr zjSFvpXvSX#vUZQXaI9cc%T%d6%_W(t9k$?#QIYeab!_^F%PZ^Ocw#$qX(+GExv zp(6nE6-zAKT{*OvK<-Bs-;WE5<6AgqkvDguEcH5kWOGJ0YT0C~(7>WmXYl0#K43Zb zqg=I9j3KsDBX&USD*1eQw4}F0VU{;S;cK<)c^Fi$9+g9k+g)EQh+5JI+ce7QFMjq3 zZ2X|a5$Q=-mUTBeU<4LHC*FCkM`=4M`v+U;D!*r^yh;Rmd)4HQn`%iuUTAGKDR)kF z=G>QExI3U^0W`->WV_W+BVbN(6X{TMrbbOSKxhdL6SL)<#Vgblmft5HZX50?zNqtQ zb!jf45G0U)N>^`abqj{Wp=m%IgZ=2wGHoj*l1)-f%4&9Z7=p+OIguuaY4tQvrl2;A zDN)TA;wvA6cdYBXkfnK24nXZfHE84Pby9aUp_n{SbG9cm*U0+m#xBTK2G zE9fGKy>b%1lqqWf5L3s}mceYv=Cau8p2C??-{vun1s`Zj%IOIPFndsTASO=WR-+Jo zD8Z5v;)y`xZ!}zOS(@DhwPxUKkf5grAn`}ox(3Ng!AXKBJ5Pvoz$-0INX2M$%{fYG z@-6~Gf0y}{Nyc7)p*yrP(wHyFm4FURe`;csw%e_pn^(Ik;KhBVSl%-oFjmxXm?R2j zwh)^W8#;u@GD?MC98+65qjg6@TW}Aa*kAtu)NlNX6Ri&xT~JUM#}&sJaYXI_;}C0) zT4quBz|7UsGt;6gATB}NIEn2;sm8yQ0qkl;B`L`>-fEkc`k4{-G*C0PKPE`u;dA80`cKhs?m@k|R%maK>olN1%i$2D`RfzAo4p8%m?8b*gYljj)Z zQ)^~cEt+tHu;P)WUxU;N-1tt_y+cR1VbkYBl9TB*EQ--wZl1-+AP;)%l4=^2liGd6 zDPB)sS{*lO7#SwH(bdUShc=m+^`aZZ&=$|B_j+u!w-0@yX@@|vCh68$_EUm?AyiRs zq$`{Bxb0)G6pjhH#I#agAQI5pGKRSk}n%{bu4tNo06^+{{YKwj|c2YWldCe z2Wew7iq7}?w z3d(L>nMFsTf)ocoYLW)!l*eijyr@E^1sK{!!5q#_V4p3P<0;$+tt`|0x7RHJzz{y> znG!*X$-t$oOIo{tnI*jY@l6+Li7hlYXeMSn^HXqzIn3?Jpu?pi93y!+tAr$u=hBf4 zK@B*1ySNb*drrwSpL$;7reQJwx~=R9srAO#%);rpVok6`|$UsMAR!tP0~1no)PFIOAna6_tY+#RqKC6qy|Nq5(sF zf%KZ2Dv}jk++4csl&|xn8DZF2_<)>wfaa`uww>Wi9mo^|%=Fyysc?NwDpq1Qp_ZE? zs8+RPN$)|l+KK5j32>Ywa%jtI68geWoyWE-S*B@B>8+PG?ztl_AM7g8h#b-Y@at>R zJV|cKoRzNCOHubHkJ7exTDp>*gvbK1_l`%2^zuh6IFIv8FT;v!<;f~MRd|qcw_rMz z!mghoD#7L`t412Mwwqc7;+p>J?JgHpMEd|QfhW;M2 zX-~P;0j0OYYI7OxeQQV?TY#U%j>HVA-i+cc&8rt7 zLO0zy=^ualR%I4FXNh0nn%ZkN?lKrpWtvdCUX#!=Dg4L$Vv2aHxe&CqX|MnQQv@Zk zTxpU=6k`@({{RxWDC!5?Rpn_Unqy(2mlPLvfBI`7sumto7HwI9>o zoEtVd^P^rax39UJ}~c2x!L@!RKQC0ObDw z6Qd4FR4lU8l-;`>MuJ=Jq{L^mQH|>zEj{+OlTyy2A)CKvd(fubZb9H0wF|vI@2P6&gr-QD zOi=G!2U~3+m4qq?Amv7vUAJ)hif)6gg%|)C29hs!!OgX7vCVqw>Qc$%Kf$7Zv*$yB;i8TKJNVhkxIn{Zj4#C3` zXcrn~wJ&ghlLXCm$D+zrpiyq>XLpo)8e^x~rA?tuy#{a&W~X-6?whRn!iGf|Y{M@g zB>w=ru6Z0)R?$d1Ln}n2Y}lsfgNk);XzNb(QWmA00!I{|OKGAOmms<^3K9)ySxkUO zkdH}$Op#csCexQTw;wIuiB}RN8gPryrpNHzUv&hO(zUU;pEQobNdVQ1Cmvr;YLqpN z7jU2D24fT{S6KOg1bQ6RVK4wCd&Xqcl;=`;?N( zgfvJE@H?c{hzTf(C+kIQ#vSWHX#v$BIH~!4>Uw3EyJ>$2>Q>@^6H^3IdzW2*UM@UI zSdj;{3vA`ZuCLa&D-KU;=~7gcXhfaZ?erGbwv?D4BEvA$K3L>)0*9%J`#@V#`DMC#d z;guarU2Qs}t9c6t$W%a|wMn?u6o)418r%JQNp%ZlNjD2W$^_DzeLn8x!mB+fpdJ@; zt)uei^sR$7c8mZmSZ$0XH9hmr)W_NuVsi%LkxdPgSMRyeOlJ5AA?PRAT*GTh}MIju{rC8mHJ z2$AhXmiEL%lT$78x(Pni2csdVHMzp#awu2@xc>k&+gKniWg`5c}m zuK*=GK_@viQe8=G#tm7!$bxcx1t`>5Oo@X6heW1l6TtVYM5yy0*i;mlGBSNl3JGIo z21K61q#H>99Fs$rq5#Pwn(td_T6>Dt1Dkx<^`n{~AOz&n=N^{x%;tz@=mxMywIdiJ z<;hF~9+a`-rFlv1T)DfRB7YGd+N^6$X4V$!i>o;R_NG?vp(ikuBvYvD zS1^lKQDmPl+|f^6hvIP*w*Bc4*#uQz5L-w|JDgVu{{T}}UFe%)ZBF6Ste2AULhYMc z!8sE=ed(ju&fVBF`-CB-kKw1k*0T3*x^+j}d0#PpMG>FN?OGz<+f*{x@*qH`a-+3+~v$}+mNt-D%tBS z8@EgGxS-;*4>x;e+m{M0bkg92q>(>Rd)Li**m!1bEgkTRloNi1Ru-b-?fYd7%eOG* z)3#{MZ%s>jdwn9XXUROrV*GeN{R_Ra_#_LVJY<1qZLXp#) z&JS;DA4{=uJj)!%4ucwn}M>ku%Ls2aDAzvC3oyqrf1r2-6P7?(CzaRF;RBk z&@70|q>3u*OI@kzFzbliCv3?38smcHq#-WcI0}Tu<6ReZ#G>2N2DJs(LruA`#Bm~; z*f3gjht?A~JQ}Y^+#4|gXOcjl&WL+YAQ`)w{k@YF!hitgz zzf?K`JA2p4RHt)>8}$OJos1HxXRIkerWCKGQl$W}MkCg%X+_u|K#7T|2KOtD2sJ;7m!FjjW2Q3wno)S z?tLoeZW7=SqGJ@vP!kVYgQo!c(AVK2;@<&MVAajIN$rtUT^-G~ujo|GGGShkanO)t<|uos$&{q#fO_n7rNi2lv69eBw_bVl6&7z~6ax|KN^Wk) zdaa(l!<{QOmh+v;dr<5TLo18Y3>c+1I)aHXsX^j})H>}rL&+eg1W*A-liIZlW~W1V zJEm%l46LLXpGmK+*;h}f; zqgzf1NGTo3vMWCy`HtL@4-{LqMmJs;XfdX(=NkoXlCdYZYSn0tP1H^!0+8D_wvdL% zKmbGvW2LCGeA0;I!8KeCrmReg;>&zq-~OG?LPaaJNl^g;NvEDEw07df4zQA<(~>~s zgUub`jU+ID^WYbI4@$Ow9$7P5>6-?1j~4quS_X_6hwg;A{HgS<6NN0cTyXK4iU&wf z^U|xgjosQU(tLo*tI@@dIF&@;%aU0o%0mq?D!O>wIrK<9g*giePdNVcxh^G!fCKC( z%T&*u;sO5EWVNykB`H&!or&}unklBkW96U&>)wMm%p(BqKEi4y{l$L?g`ch|C2SNH z;CS$)3ZcgCKj$_-`f7u5dVk>{ed-E&=-})hV_8|K>xsG?Jkk1!5yf5%sixLH=`>#F z$o@n15mmVAhw&OeQBH!iFE?$@*IOt);;n2n=ixOgo|CD_1k#s5W>m_sJ5s$4osSu5 zE#{Z&cLBRq$E60=qN9T}zgN0b#SMCz))F=>buG4$JF73_8&_=`b8B0hQ-6q_57M`m ztp=^oPhm(rQx}%bFr1PxT)t0_9N69VhJ@Nwy_g4eKDClMumV%CO<$*7wC$;Bu^^GS z6C4~;>kTc(+(I55D@J6vPq#Ft==C)R4XIifFjJW|@>1mn?5dQY`7>Q>ad;eX-8Qv0 zGc$la>0RhjlX4wo0^r}$YXFZ*Qud@m#j$b%xKNXyN+}n&maic!y0Y4n2XNdC2}#M( zr$5Mn+GT|(y9;Bd1dw?g=DTZrZ>P$Rl1YN2lj%j4n7ec|;(lox%YVv!y^RW%m8b_T z+_mUNHk=w&q7F3CCTzYVc#^1`9{&K93w5O2+}#NX=>+pKVywA-^GtaRbjuBZAgQ?# z^%UC2uW@qH3y3A-1$%%={{Tv4q@V0Di(LurwC4g6l>k7(LB!EXx438&DX?1@=9z0Y z*EToXZxSScQ?*`L^%Sr^>&_FnluiiEdKjv=+0lul?e-G$P9>lkX`%!i)b@uDOtf)O zLzfObhW3Jl_f`oWu|?ilGT;uh>^h#raa{=)dmx71WB?Td{+!R+h;1cA1j@gwFn=nl z!{QRzON!eE)@GQtlJfpvnSWYn>?_pTCB z(NM`8_onwoI@2X4NgnWKrj>0!Eqrz&hiIYa2@ev#uN7cnDW5XtWJ!@)%z1UznCcee zjVo@`#T43B?v~nAuR0Gj(b6E31gSsTf^5QFNgNrgN7@=+WY}o70RI4}RnCH!SG6WV zq6$G}vODt?E0lcQz|JOsY+mDY=I>B!NkkqfwImc30nTVq2@(estqh$Wm!`U0)7WuD zl3N%QX?toRVm*MQz8`%`(%J<_$|C}pN=KPpLE1<&T=H8l=q0x|4XPzAjKo!e)PV?X zW<6*=qj@PV1^)o;=DuCP+2)kv`gyF)V=(;SY!Ewku9w>l%=M4yOFE^-@DYLTYWpXO zfl8z74GZ)!TaKy4e9n7N4mewO6F52brWbpXiE0xaqJg-VNx?}XCWQd4*8_}|MrlRt z^$%(Grkg`zGFE%gcj;|S=82lI6U~ynpvohf>PJWeyFG;xRM5gQMGA7{#wa&NNFf&o zVCo#zj<{W9gr9m>a7Z#pBD-knjmS<2BAO1^vtJR5w;OPtd(f(UM3XT=-#Qz8#?-yA zXilhcds=X;_6>Ls?eva7_p2E2?x}Fr_L&U@an(CduILgGpE?0kkCk;-OVx}0!wK? zCnlnm6r`jPo@vx*M(oG<$4E+T(Rh*pm?Cn0>lJd=+Vy=&h90+bspnZq&ua5MKy;O{ z!31E>tt6E_4;QK&~NS-!=cvDrFRLX?y}Xz zv$q#XC@d(Dr1GRwynLb}$;LxVW|eSAOcHB_#w||N{O5g0@{gyj0cCyhEbnL(G?Nii zcHy-vXsLf4Bc)uH#*LLa;uPT|o=sbKa}Ji$a}++FWx?zycT}|!U<2z$F3HVmix%pX z20|yDDE@VAgJJDug#aYN)jU-b)NgPhL$p(k0g<%42;Vz6?L@|rx9#^pJ3V)L)Lpm@ zAi9(IGlS_=IOB)P5`qpSVwP)8T-{nYix6P~2?Hwjri-UwX7L)Bd89yqK*ct&x*b6V zyw}4Q&s|tI@eQpDLFt+A6}Hpl5N2!B$CHvdCkixIx1BbL9MHN#eC5@0Xgh@$6ca|Y zdO|&`p)w3&)16mjXKxg9O)2v(ld^=;+W!D|>MOo#_b)cM0ID~wiR^I8475C{6>i`- z0)w}Ab6}e4-JAh5prYm61dP=t_)G+oRrHFFnu}?ap&Vwg9)V5V@|BOZL2d)yj!;f< zORmC412k;QVDGk<0Wk;ER%+(q(R8*sGlNX%~H-)J(jDP5~7aY0UUb3-;)sa=;$ z^ufhBWw6YWc=fH@4@N)6g<3VMw>E8#tK=d`kPUgYAx%6$!ZihGrZM@~?!!@O6A{iWlm%knOt`iz~bVJ+NkwJNc=Jh4PnEKG|G|upggbkCu zE;rljT)MJm_0%P&1SH8+<|*SB-HjI~E3k*|+Ih!P9NXQXqm=tqP1@g<%hsjMox@z! zt!d7(LfB;vw2>sZL!Wb2J8-93U&4Tr1dfq4TxqAsvWrdGQ#y;CO{6#qc@ahpTHWp8 zrF_B2II3M`)@@sOgf8J+fp`k9mfI~O)Xgt5yyKM7 zjVtAuKZJ@AU&R0%NflVVjQS6^Gi=%Q6?&kQ9R3;-)Q^T4$TUF^j(gE)D`>4`Z9hsF z0OCmc(N2hLeJEzkD1+-t{(_Arg(pqzgNfi%FyyzFq^q<=De&#a)|Qg4*ry#}M8jw6 zisqWxc=Sz&QXASAiNHyyEjx^eAa@*BJ~b|RQk~wU993^T$%#qFr2w66@($FM&kzZU zI^FQk;z*O|X(z`ZI7>@k7=fCDnu2yqg?aYLth8$$KY9$10L&9K60gEmQXN4apc+>5 z!U<@st}vt9?Ls+yDDtRv0l++cYcM%6xzqqYQM!31gu8J!0t23ZNu{Fb6oVjo|* zMQsyu)d>;d#p?QluQ=yQ*u)sg#Xo1m_AJ^vZ*G;dz7VHVg&$ZnW{jz_AaEdo^%P)I zNF-!?8a!!%J4ahg4=}Wd6XzHlX+4{FBvlS8ss!p3Hhh$VuREY`}S@% zZlb~pPT0zi)Y7%~6wI}oXwz2xn{OA~Qht9-{IQNHR;hIBRu7j(?ywDwsf46@PyzRJ z>!%)UPgE2tYr5l(i%T|}Su81_xSmg~Q=P{`IQ*=2;3gAwX?ll5ppjA5G`pJ>6*f0F z&N&q?0KL$8)=YrvXYMOx@{pviM3|{K+OmpG82t|H0#M-_$g3Vu&zdnnywGO!t-mZB ziYb_V5PfMAr$v{^QDut{KB#MtQ%>EtLl(g%!$MbFx_oFwf&k%}uv2c-an=MzAOP1}2$aCCDH{`_e2@h3$FAMAPy+IR@9}EtkS(PgqyPCWFC_F2xU9vi!JC%>{;Jhoj+;?gHpJBk4b_5Oq98{BfwMhqSWl{-D)xIUYZQV`O z^ucK=BhZ}CT|lUW6u0OVHS;-fC{3hag{vMin`h^w;>y$|K}nCwfw@hUy20Eem@;_( z07|mNHXH>?=`4N|IH7K>TQ4obox8GOF`AQtlM*rH=+J4dvTV>>A!r|njF_VvX@t59 zU=pr9=?;#{q_mJj1jyQ`ll7X@v!%;OOO0o-nKh${CYn02;c<2oXJQiIxMgc)#Qq=- z2=5}LuG=*|TObgK1waUi6E*X8PBOq-DuwRMo<&&di?}%>0CpA87}3zxnfF?{acDO8 z4UNW6P{ENz1h|I)eN8JdxF?!nZD^vhep*6|jLkvaJSEWziSHFiB772da@B-qraqpP zYErgUxDZI@V!G1`7%l-E8LFI0lI*ER={PlXjP@3X)!0nc%?;9|qDOf=P(YE%@7PuC zlmJSV1p8po6p?g(7!XeaxY^oP3X#9Cn)%f3#yz5kWi2rF)eXx~<#{yJ10dYP@IA#2 z{{Z4D9LO|KAtb@%P_GXn<){gp!j_ zF2K4k{z6sU;)E#(aM4QvLz#|qL%0H55y=#+WE-Zt(I?YA=+`e9+Ek@}vT0Ll1*06n zpURQG@hgFf=E>PuZBe9qRAsaR+KJ8p5&r;c8SP)G z;nuDi-LV|tB}9t(wziT1Km&jU1+dY1aF)*^d5WcLZb!>r$@H(3q_rj0NRHXA6%Dhy zjG3lZ`UBB7odXSn?m((rfJt#KBq&N-C(N((6~)~pP#OhCjnU;F>s782xCD}UCzC~` z7kJ>4z>0V(6Cu{2i)t zX{%!;Mkei^p;$SSnzvJ9{{Sx2&oJK4%8S&3MeB)ACoxv)O-6-e>~|>s^kbyV`$Ek< z;cnsx#?yma%VKVsLYtKp3>xz{T~byyr<0Rjf%`txQ0Wc3%VWJ5g-4S}TdZjpjkvW2 zbIvI}qmHw11Bodf_3eYAjlPv(l41+&C5wXaOr^@&1p1;OBX#y4DRH16wBjm9l8r=QM=Qn8OH$8 zpY11EvfJq?p7qP7#l_6v`9#Gmy3?eOo1ed z!1y(%ZSS292r2?1y$PSd2Y&U{i)e)OSkrQ7#@U|L+g2wuT2c}-PTdl+q#&lDaPVoe zRaU<0TR|!@NUrW2yK`$`gjQc*1-Ny~x1ez}eY0H__!zF*+6IP3YUfg8JXT8Gjd~R7 z7ZR-YtXGJyz8tzg;yNH!*7>3MjsUMc*E)B)xGFFch^3cSJVETGEdlGzFld;-fmOE4 zE##9j#yKmwCnrPrKW&VDsjmX#Ac@5P=PEW2@V(=x-E zn4PAxmfCko1u0}GoPc}RzS`;OyR>yLCvp!WyxUi_LmGv^w}gg?04uatk>>bSd17|> z`6DI8nI`mx?v{i;(%J_*M=AHHyQKU(fF;$aCxgkY14$Q2`JjhLCveUwdaYAv;ZY(K zWDXlB_^VkC zv;tjm3xUe8YMg7KlCZ?u6uL^32v;O46?#BvB_IM1yu~XJ9$D)Uj{WmSwRTd=i*dG} z90er7{Hfw+qWq>yAqUv@p&WTCcjg_7;tzU=xI=?*Qj&Oy#dYe3gJgj;%U~&X;j-Xqrq|R0cPju@jW;e8x=;xpia4pZ3!sUU z+jS12SwAr1VvU(AF@)^`J0A7N#7QIhqPpsSYQj&wO7sxe^eGFY`zS|GQA58}!J^if zbdkj3m)km0>I`yw(=ug5js-lv#fqY~LW8(C?^L`;k~8l_bpRw76I8fC2kS~QMV6vm z0P$ISvZTJ_WFIMr6{fetfcw&2Ww_KGj^=+lhF;NJYhtxI995R`S0TB)9gE++sHH#Wjh4Y~)LV?`? zp7=F?QP&kXQjh~= zV}L4$9F$5_Jt%9ZTxR68xH@|_*w5aqMU#$zr_>5L+L4d@(dXbHwnEKFf=*bm1(iwE1M-x!FZ*zghG`6fTS|uK2n8Y}(zGx-Sxf0yeT{>#JesXGR_znDq$v8u7$IYVVx!tffm)HQ z!+4t1+vSgK>jf_4sX;mRD+!7ZHl{FAeG^NWUsbAS>}^y@Ny58N5hPP&BSbZY38A zGVa`8XqOV=7PhC6T5Ts@V)?Hsg!)svrY-K;hY}5Bue>xFxi8qOVVN=BrqvWrRL7Sp zN|Qh=20n9GUw?WXFFMROa5L*xtSQk6;**f<&}o-t`)PAEm+_Z{ZS{u(?H#YAN7Pd- zElO>|j>3$2$e5<&BUX6{OPXiITR9=r_B7MOmW75*&B1Xmtop=?(d(LRy~V(|b)csJ z**?_2;hB2`1pyKEtK(X;lgOLCCm(rFI#~@kcK-l2=oKd}kkyNwa9i4#B=@0iE)v7t zEv_i5JYtM(rrd)60GVfm#}!n)oUQ7N{{V*+Dx+EyEAm8hOG+O)1*hZ=#_MEW*q zW|eByp09Y*A!b5XGm%ciB)H_dOJ{_@$KH#(uq8+?$5B%#LYxGkj!1(>e)VLIW|jb; z03;A9)~(u5;8$KLwuw$DJ2svSZ90SNc_|?Y#MbGncACk_uRYK;fu^u6+H0Rq^{2PI zy}KVRqL6!Y_cH6`eJaK2&yT@yW8SHvPLVgkbjpvA zRNClAlz~Hw2mb(?;~v7gtu>G4Qpx(!HzM~gnQxT}0_#(33>ul!?Z+gas;ZS++b4tr zTA7nbMSXVl#E=MRa%(89JL5GH;B=)2wQld7N?Rc<4eC)Pz0YaD#)p&E{0ZG$r+yXV@hPUdVWQo z@i9ZT)9e-WosdK-c&4KiLYy+R@9id%K67o0ZQYh`XI^Vm}*YW*AC3xSwD%4?4(xWq>Qpqj0) zOGqP;in|>pk=hBP*x*h1Qi6GrO(sI*V~SQ{00TJ`&17_OCTZKyt4xE50A$HIs??uD z_O7hUXe34=xyxXyno)_ia0vFGb*9HvquoiPEpolQM2eQOpZ8WAAV4uv$h15iF#v1> z#b0g;l?mkFRA8vne-2p-tzO-c_$F2K8K|PI+2|SzBFji&mQYmx04_m@C)S&mcQ(Ds zAp07vVYY;$c|O(m#U-ICX~pi~0W?H=C2rE08$l70N#peeqjNaNasH;uT_Ehq>vG%HtcCPV7M;XQ?Aqfu?rTvK5-?gVE( zP-=~!NCPp=Ref=DC#BL$ey1n$ubD28I;wzuF+>E?t%{Ko92&3rq@LnMNS2@{^P#Tp z8fh*kB<&=Gwh73}pi=XyAC84V zXpQb|!j3VTccC`!_b(kt3tkeD{GzQV#WGR(G=@lRt44_f>Qr)mrlRRv&3uvNLrkCc|3q1C+Sx0an^!TVFM5Rb>S+h`G+LBrb9Og{Y3yU<_=<8*(AichOo+zVm1;hh~*&Uvs?c0 zx^q;pPvU*Qy68u|L8P_(OQt_LtYeKf0rHQ^g1Bulz@`5H)nBNLzq2Fln%ln`XsiPc zgY;~gM|b;1(*fARZWK-gH5Hn=Z4huN1I&e800w4^KeT5yl1S6=altlfwNDmYT>_hS zcF%PhL2qvR_d(|rRI}?u*x8p3NX0z0VYai0uDLfZ&LvBrYiA#2P*WqlHEQwZk+~*E zp{{iq10+>$Tr@_}-i&lB3~yYHMrwcU0*B#EDOJ3Yw=+lC=?Zj$p-!6>(KYK=PZ)3X ztF3IVZ&xp8yH`WM}~ZKCPxbig2fX1qJauB__~>c`Af zsC6Uxo2{>nwKVES;nKCJ&&x`Gxvc%xp2@RVx>zMb6DKB%r}0Kliz;pOs@WYfRJ0I5 z2k{QZiZ7Hn+o#CB#a#fn@?;K@8PJ)$#Jx0%$@~TVOY6B{5_?CbzfIU0aY6>oHDp?WfL^6Wv z7F)oc^O_r3>oOZ-Px-IC!c;$p?@bgBbc?<(PqdV}`$rOI1XSB?w~~5W6jhqYUwA6{ z@VB141@n*q=BJ?DUFg?MS?Yg){{RhLr_!Dhiqb;P600Yv+G>XQTXmPxc=IzAq0)55 zbmk$UD?F(3YWucD(^8iRA_##|()9+-@4;OSuf(LD7Corvi=YTR)n z9fx_Ul;Ihfilh>w_NtfV1gObKqe7LU;pDjjR8M1yFRr55E(lz4*iws|hgiB2mr4p` z5DEK5SQcGv3VbYMv5(TSbQ%b{RJGG>*fBBys+_!RwvL#X&zH49!MVMFY;WmZWCaKb zAWt=_-5?0nuDqL@YkftgxKGRw)%qfWutSo;j#y>g&=FZK>N=>>QeOV*F-1Ze? zZ7re}`U2V*>8BC&^`fK0au=291d!w47S!5Ae-T6ieuAdWze%m2A2%xGs;e%_Ji8Xr z;QD<TPjwg(;v-6%3MH_v596K>bUcV(Iojt+PJjN49KA+(2@+n>{M!^ zr8-MZdDc0OKRQceP-LKx90Y+>IPwy`-6sh4AKr&=PU;QM%5(TaiL1t%h6JTB3H6#Z z*^UQNsrhTqsH)sjMD*ENSwBHUwvb#s;;mnl$N)_WFds>3?gd07g#5{q*inlfd~;NZ zP%sHS;<1YQ4Ie(#NX0MJ;N97%j29r&GSn^-l5w2UZx9zgI?0CI2|tV|4h=LF*d^%e z+G?aFYU#3VF|3WvANP2KW@wrtQ)F z73=YKc^r)UR!_QkG|JmXc;w=oA^T>zC>7JI+O^B5{OOcLY`09)0|Y6>N%W@jl<@%8 zY2OksFx;s73a9tEdpS?J#Z^)!)tW~x($FND^M4um#5(hub&HErhYBr^(uVPGh~9U7 zmYnCELKNyx6@#_DR-b%J$m*1cb#QU-Oy|W{u*L?bO0_LZj^O*5ubg;1?ZasVkVHA@ zRGmpTR)`Fe5im#}+P<)9z~NX-9wsqLt@H;=v*yy3HXkkSaYisX^qMiEhL+-kMMNjC z?@_ZzHcLps?NPM4dfn$z2rDP+ijR{~#^2dYHz}ZmJRAWD1gz#q`KY^O;I><(xoDiu zMAX}2ZQOan5$60P9My&58ZF5!5ty$Xf05|0CiJ_B>sqElxoiv?w&kZ8xGX-DAC&p3TTOp zipyW?AG$7GYU~t)!6OE$(s2)u{I)f9!bETdVt;4}O01eJ$y!w=wc1_F7fgky?m~o& z*DozCl^`S*)q^!8t7z+-Z$@*$`mldXLoO(F6NOKDkBF^bZ)MvK0=7=}|4Fb3v^FoI{;mp$uY@iOrMK2)o4I5}+0qIK2B6U#m=LBN5{ud3aNw-pg^0;D7 zC)O(usE_xzQaK7WvhV?}BvIRwN_$8_3L{}7ClwSr+B8FHLck?Q+P-X%oPd%?CZP&# zox&2o=8sAa_0ZZkI;g~eO%~}}GFB1MApEjQ%~l#%J08L&WY;WTFE_ci)=z&=xvK6i z+1_$qcm7&)6k{Gr&>Q0DC1tV;gpG;rdr(NYQ|sGmN*j#1-2E%4ZC6A&b|D>_>^{D= zcuGi=i0zzHa^tA>DqNn)GLX%&?!bEiBl6;^>4J}# zAp38u0 ziIWs8$xvB5kVbf}?2#n)s1OK|Gg1kdlb=e)D+wY(W+qIUmrfgyVrq9T1{x#@p_)?C z(%f}lEaU^)kGeJJi;pFwONJmxC(^PyzND312U6K25`iNz?OK(y@dP;VI|m6p{mmlr z9h)v7T@@1I6fxSV86~7EO^-AkP4+F@>ySra48?0L^$Ta31yD_5-a=B&)__cY7{x&f zQW-#!4ME-6)ePFH8$)qunJL+t15mwTEfoSQCgaZp6>c$9y7R8Lw3!Fqk;TbTSC$ku zZuhS64XXl=_@I%^ViDGAvv*WiH$k8bowG^Iy@+O#|f zibR{hPc?Ebz37;B9mh2^I0t%Chs1MJ_<`$8YZ`S$R^y_V{71b($NlRWXfbd48mzxS zsp+a*f8MZLu=U-0(CfPQpcePNQnzV99PP$Apk97zhaSZQ^ON4NwpV`qAm)^z=aPO? zq{w;=6 zDo_>O7`N>6+=1BA-E%KI;rr7DwA*&1xu30P^-mdDSpt@cdBNE^tcIuJjYCp*r@ULR ztYGBTR8s6(Y}{)8CerkoX`mjs?3C6YS@9;JeRNuQ{5uLBP?OKp(q@|J-7FAEj(vXg zXk^?ZAo+mc#}!ZL3P|m#PgNfZNL=KTPLpWbm)26ZJIUBF_p0n{QlBvrMhW$%&;xMT z6A?`qH?~Ss^nuw)VLMq3^we^drxWTuDV5XZC=EP8C~xsbNu{7Dlmmbb5pj4ycg$yO z;QLcbp|N6|w9a*9pc2~9j_xZJJs#nnoxLO}?sU8TEyBGdi<@qG zbCFzYRJq~n8ir#1WoI&*DQ%1aO?!)sDKS~yx*+}G^bbz&uKK}qy)==wq zuXKQkP>L}4Ug1qc#SDYbb5Z{QZ5ICkmBkeOs9*Mx*HluVwIkF~Z6+h+-LZhhuqUyq z-vB_sq*m^pI)|IST}5BCK#tq3%M3RN6=e4r@oS zOQ7AQNimb@TqJK1=qkRU3S5a8A}iwHC=tl4Vr#((6iuq5KY9SUCnq&!vyqAe18RT^ zvgZJb)A%v+yEUbhm6WPT5m}8&#^+LbwAvu3L=_VNeNAXQI>L0cJ8x*|y0}~w0r*q* zsKyd)`a^O_SF=Lkx+IXG$Q}7t=R=N&SqNy}tSV(#0Q;Jlw_1@mj>$RNo&{cALrt6j z0H>%&t+7`owhmfx?7sHQ;fBW4{(b7^!))O#As}ZayH6+=x2ii6iVtQ!LqWOQ1S_%a zLFg?cTP-gS!r>yA^wOY2VwYH`H+1_+Bm_W?-RQ1>_Quj25hMU=m0PsqyOxn7gB8@O zOaaAMHz<)4=}7hyrKFMNGmiE0K~6aORVS}7l!~<^wv!{87Bv<`l1c4ck&5V^oMI}c z0$W1V_7E}eK7x&G7I&zC4)Gv=dZ2v81v)YOsU~8qZG{pR=;V1vpsH?O4G@&HwaJ6F zvvjlst%Nx0Qhd+>@7lBa`jYc5C7=%8NaCD2U2%{L2=#>-kII+o=?&@YAViVc1!dWv zL9JT!&f@7Y!g~titOMphkJ7AacqagGd9J@80+I~kD;Q6A07A&mU}=4;ojan-iC%vR zj74bOOJEf!;B!FyF}U8Ml7@(ZjH~|CRDH8@r$T*Db?lu>N*ypU2k}-;%`e-nw>Fpt zJ?nVhg>IGZ6CK3W$gyT%HyyLygsrnDLY7|-u(Yx_ZV+T~ifyJ1+HJB*XXSuR65W$F ztrCR9jN+=WQdMNw+-EZ%T4mKSUfqygtB54FNE3*t2W|)O$XNC@e?`XJaYa&v4hg6> z+TB9_ALTSe{{Zf!c#~FAIpq_L>S{>y0zK%)H9&uwglgZf9}v#MJ@HpLr!b*0wEI+^ z66?wPLq@d_M>TS9N3CZ~iqa*QzRID}_1Igf3Ek2#s5Gt;UWzQSe$8*jk6QTMk6Ow6 zUBEC}a(yDcI+1LAz_k5EEexigvrum$s$JOitjW~mNC)z&m8g?AKa~|0{TrGztGgbx zO76$4W*pSO^{5NiUH%$P6^d`92a+*X*`jvwBfTuX)RNm|m2pe_$^^RP4?#&4*YmgS?CeqAeaWqrW zf*uUKnRTi9iXF7Dlc;ihL%qu5|p^lc> zC|j~hOpMT=*^6&R4nS!nli!M9-iG6QHxMVhRI{Qxs94Jk689NAoSaUNXf^90-F8()@r*o0usy)%NWJ-FKxe zs7P%ilUt81qg_xt4LJLoSxHe*?MxAF#g=a!>N(k_&-XMlnwzRgYSkfzfWS{QHM)Le zt0an6;-0tufmZtpzQ4F~*z}J@v5uN*&jvfuzbPb=w3@BP_b}-8shaqmN$IT2us*Omj6pqMT7G%=a~bb>yY6fz}ujK{Z>h zqCz(bK9%xbWyeTTLeB(5il^aF7F2|U<0p#B!)3tm5+rvfq_Zg@B#0TsP}u=sgXHdJ zrd{W5N%XA1eA{4xTqLjGwR8v`V^s$>5MER);0flfjlg73K{)9kK*^{QLULf9XqMEL znJ|=0)g#PI=4(9|71wSjV+|EY;#X=O-M1RlpF@oh>XW>tGfms_xCoOUhzb)^btuRp42m} zhh0P0Zf|u4QU(C1?St~O+>`yuq9bZDL0Qcg(UJ3;iZJQ>RY<|5W=pYl*2&C zPp{UCaWakxBCSuAAQK1HzFK3v`&MQ^?jD#bP>_8SHCkM0!W#)V>{3KgQHTKjDlin7 z2Q`gkNFW*XrMfvH$hKehySw%S_S+F<}hS3`kPk(lQct(g&!QUascUz zOR0Hst<9^6?MuzzwbWmPthS(uJVB=mDh6Z722EsjN0Plurr01nK$1I8YSBw#wrF)X zm#4Jd!z8KICl%_%ZPI04bB{M@rG6f4-D|D74&)&u z@!pj#Qb=b0EZ2tHZ0!jU1PY?zzMA*8Bt}G03LAvg3vi}Vcs1-sDyZOY-HPt58gLgI zh&`w)as#VO{t*#OUM06BJWgp{@^-n`YwjyaT1+r|Xp`3+b@Z4J0-}9t!EQsV+&}}k zq#6gD5Q)f)Z!xHRt(nbamibv8I=4~Os>?x5LU*OkM9}Z4+!KcY?0KSK z@L`w-HCuvK6jA|5ZsK2i}V> z+q-O1n_v&9~Tbq1bwPSPJy%;;ED~Q0IH5alegM{aT{?lnwDP2hRwc^@}99_ zK##+j=0#fL!7?W!fm!_q`7Bzxmt^fb#8ZU0r;B7DBq+{nU76|7LR;=g2imycP}$s9 zz`4TU?c){IsyWH+O;ig$E*8!bf)6uXal;}ivh2W6aCHbuR!s{v5^CKIx?MhX z$0W%gO8HML`I6D*IpV5u#kP&W0DTT?mg^uOmmnVMFezK0(7^+9PC3TWzw7H=ZAHQq zNY4-=hYvWFtqCoi!aYSzwwz3odZ*B0ve*k`CWP%L=|Mu>no`vtHuH_PH+oS_f~LyU0$LoYSOSN$eJOM` zs5P~~CR4$pOKQ7wbH*D+TWEd}_4PFTYj2@$-6OqKIVorqv_?;NKhRZp&*Vbd?1eTiL8|vgN_8uz$4x%KZyxdiU1^UAQ z2}uxZ=;lZm%@%Bw$~t4FkU1qlPPcKGcZfbu$Dq?{O&ISGg<8w zq`9iN)9MN784dcH)7n~r^X?LNpG4t4!k4<$jjpF}b8gFu3Q z;0ZPJfkI$ST6G9NCe6R9jMSaDxpA*BZ7q|-U{a}HpwU@?oXno}Np48VIiX%FFZEd) zfx#xMb>%hyOqW%FDbnO-34GF>yC}9&&LPa=Kh}oVn`w(((`hg&V*ld7*Dots6rc~M-8;}pD zdV8YWb#B}z`gjB0mlm=kPf=!#mlB|3nyTTc2m94MPMeZAua2g~?Qu|R+KW8)g>R^| zvYtf>@PRGlcc+h~n}*~;KGcrkE4L&;QYokj?uB<&pte*;(9)3ejjjPPT05aO+mjG~ zN>+wi>ip3GM;YR|@?&mRnbQeU*=4)w1a;H79q7=T2Vxu?aCxEL)8zxAcY4%+b|{G> zHB_X#R!&YVVvR1{;qu5B=4<1lTec<3fgPaI_N-ix2IT$egF05@nN>24yP+Jb`^5)L zw*-%g268egS^L$0yAqs{ zB4adit{rK;q$BH8^}~gGvm_ckRRfg1$TecqZV&+WB8y}O-j3u0ilzWxlk}wLTmYh6R^vG)hef5EK)A}5Ht=ABMqBDOrJj@a`=3|xkh#s>=J9G*Fb&_1zn3Nfa%@J>L(s2#Am4LqS#Sgo! z1VJF+*Tt)sTsyLaa~z7s2)%3JUU0OcVB`$c8Qk#dHs;y>zA8YJwp>1L*ouI7lL;6+ zPAhiI67S##YB!F;g${$4Qy*%RYI#7VTLA`S3L)!IqX3l&??#MZbgOr-Wi9YIS7Apz zbk3g1X2^$K;ZD%kI-*^RO9uKj-Q$x~G|=;>-mcnOU5i(@&=jdUfO7G^xaK`+`+Clw zYJlTs@n3L@+v_) zc;JYuLSsGXzf8+cf*RxAD2bGcsiBT)dm66rPH8eN z1}|iE>#v8pZP0NrYgis_M{-Uh0Pu_>OX(=*1sPv*Tq2_{t#WjMCJJo`gh~LGiC) zi0v;;!3}jEFDHodrYex7*phG1OD-GjKseHhkQ2!mihSH8Nl`x3kHY{9V7UpBP(T^` z)4*gI{?xf0y#@-Evb&d^NFlbT{8zzUTobeoPp`Fnr=ajRjQwgYDg>wwXNp0C8MSrP z{I<*v;9{;`oK*@wQ|e4m1gYl&Bxi{0yT;hXyl_5(@hCIA_iqGWeDN?c?9@JJe%XY=Rxk5o3l_c{< zIjG(BhS%F_c-q*d8VDy!YC!}@1pR8~?LwCUM}MseKq=_&$3Jy);#|GBUfy#9^sCYv zCJL5)Wk!HHdu$T~D3iz>)twdWJ5*qEJ*X(xqc$1Pwn__%NCTYnOWJ>?_fflIrvjzD z*hu%Q8fU8tGtwL%>Obj3EzX*E6qQND2_l`tRK-tV@qcr1)Vz2oc5OHUtJag#)60P} zQbO_C4KS0_CJc9q66v!2tALnnr2XqfaWi_e;^NgCP<9;iQwL}h_2#P69aFZ95(I6J z%D%Xh$l5ur(XUoY>FuMbv;=mFsgS+J+l;L6BY{;m(v;wNz*K?4eLd>EXHXW5!gAt- zQ(Yh}un!bo4wy?0!?7d^$X&W_91}PrF_A^wsn^}>IWy}OQE|kU(%F<5ne9_<##M9^ zYNWdGV|S7M52B()5#gsDYgOpJAgg)-`cMIdC^zunk`4z18hN9oKwJD+CgCLQK7jW> zl{PQtm?ivL9bB}e$p%FN<4AW7BoR%*Lx=!omtD46l55#VoGfvt7+l+|f$3E{sus^p z^uhHUQ4U-s6FK^dUvTp(Sp-cr{=${^33qUkLyf3F?M2!lySsUAB|K8go`@LruIQJ4 z4}Y~^fpm^vC1o6bIv6$`w^<;V?kSrW1w=|g6ko%(>Nlir2fYq#++{&cRcg8*X$`umc>yMw+gdUQ%XE*atzxuM&~?N7LMo+MKID$6`qN}KQXoQ) zYKyJC7(o2!rV0|BAtXzVl~j(F!$L!(4#sHLT6Mn^)TUCrlf2AgQ?aSBn1^k z5AhAiDDr?l^k2d>kbYdR>0NfwwKTa%7{zWjZb=GllC?UpB=1)9{wamafwTsjBdDQP zbyAyVl-qE2!TO343a&%We=s98DK0DaGJKfDw3E_olkP&+q$Bhg#a@_xZFA+Kq7J|m&Vsl(tQc3^uw%% z$U$)MJJjfVCuoq+JtYMJQR$Z*T8tSup;=`_f|6S)?L5(_A1`uU8^<-5Mm`BjTs~-@ zLr_hlMbM_(JN~tD6onninC?K4R;Z-GY<7{FGzRFbGN>Iil#|G-*iwR_%oPlfc@^HV z9%7&f>^U_833NE4f-qtzfaTjMO37_wc;Q~PQDJH_q6s<3qMb;HBgxvQXVXDDkQ9Jp z>00d80n%K`nkgXi+KHoqaY6!&&IDIf>1dMBlzI_eOUZc>l1I3~rl1~P&<(sh$)-%U zq};6|h?7g)AY34)iKj_W_@p1|Oj9A>*-M~EP|gVd05xlA>LAZ_*VhmdcIQK(_lNbN?1^9PYZ>DI3mb)AI3{3DZ1 zB4B|7y*HpATicMxEs|%L99PL^3_<$Q5FiHCn4i5&Zb--`D4NbUF|}m#gB5E{HYRgb zd5yp-001!tu4M`Yllu0o0?wR~%!4@ZUl)-jyK}kD6y#w)_o`O;xFj_xU|_g8SJJI* z#zET3LSwKPqow^p*;)dSv(v~O&w8Ko1u7?!IjK0LhXR(PvN!{pi)@X>-Nqx1)T0XY zP#O5P)Seul&X}Ra6~qmbITQt$9~X8;U4v1=IM6B=Sul)JedQ1PJ=oFEK4cP_4rm{530rT~5*f5zSNQ4eV>o zL=`G9nslsI%L{Hey@?_t%tb)j59 zc&Xhu3vKl{L4r@cNwoz-i8Huh)i<|ofWL}BS`TP~e)N)E473lUC;h~wBw@k?{b<74 zMlcEF`cQgdXcuwGv25l6FoL z#_BKs09N2pA1H7=Xvb31t^yhff!>SKpe?`AQ+u2d#8*ivIRo0KxIsu9ezZHxhi(w- zgvxjT&|7ao)RzKLSjB zX;sVdeEO6~AOXx%hRbHr3IlAnMoNWSbamGQEfpletahh%r(OY22vpMxyXE{k+jNXb zk;xUEaH-dUQ6vEXRGZMxO3SW-Rd6JBQ{P3}O}TAusYHg5nF^kLDz{#ByQZI#N{(aFoNbi0hTDVz z3MaRu(!0BLG!_*5O9?m$J-(G|r9&<3)Y`gQASz-i_ljI@1XA{$+=NNU5nbza#H8_F zzA0MS=VV(bxwCA7W|ci;6C~3IuVj#*Iic=vT8NTQ(yG#Zin$>j(-An$E^)q_s8Klv zX|?^sBrXY=^rWwDQ6(V7a^-PbI$)o(ZKYi)rpNO(PZgkVrt?6yaCsG#@Z!3*ZSM4f zeGNWoE-<~#cH~v#EhclxZG|~%CcZ_vAB6UbA(z{@1Q1fND$vnRd8V-_4YnNzD(0{0 zy$7b~`_+@!gCe52$VLhEqMf5qJFdQxWCaMJS9aoOb7P)qfoPKs98?^)YGm;g>CmkT z&C`x50%PsXUbK{?DMn0puH0R)*xJf>EP7JMU47*)PoIJ9S;~x}eDi~(!$}wb$@R?? z$UQVj#ZucliBgUdAW@0KAgh5;o6}_?wvj22RWMR8YBu*xf>P;0SQwmCvZl!L5j~F; zSPfdDdPPK;=3=~t_h+L)4J9)sB>Ecoz4JDLCyA>5D2GFh0UxD7u&b+ul(Mo<{ISIY zQB~4jbP@*OJuB-5e*jQ$kUsH5FMBC1$qs+ZBbvFZS9(Z%DpBg6oqBoITTJf8Fe-BlngLWb|dTcA+EDpP_m06nX!Q9P+& zM?BC$L#RqZ&^w8mvfk`C@og$9Tz3>~)L-T;pk#t@s$r+Ll`N&=0iI`?5o2mR(hzc( zs>71JAI`8C!EFYdTWTRlj6vdtAhRHd`&2`uFL9nK^}5^Ept?zXU=GnurU#?{+5W6% AV*mgE literal 0 HcmV?d00001 diff --git a/test/remote/test_helper.rb b/test/remote/test_helper.rb new file mode 100644 index 0000000..d93846b --- /dev/null +++ b/test/remote/test_helper.rb @@ -0,0 +1,30 @@ +require 'test/unit' +require 'uri' +$:.unshift File.dirname(__FILE__) + '/../../lib' +require 'aws/s3' +require_library_or_gem 'breakpoint' + +TEST_BUCKET = 'aws-s3-tests' +TEST_FILE = File.dirname(__FILE__) + '/test_file.data' + +class Test::Unit::TestCase + include AWS::S3 + def establish_real_connection + Base.establish_connection!( + :access_key_id => ENV['AMAZON_ACCESS_KEY_ID'], + :secret_access_key => ENV['AMAZON_SECRET_ACCESS_KEY'] + ) + end + + def disconnect! + Base.disconnect + end + + class TestBucket < Bucket + set_current_bucket_to TEST_BUCKET + end + + class TestS3Object < S3Object + set_current_bucket_to TEST_BUCKET + end +end \ No newline at end of file diff --git a/test/response_test.rb b/test/response_test.rb new file mode 100644 index 0000000..499a89f --- /dev/null +++ b/test/response_test.rb @@ -0,0 +1,70 @@ +require File.dirname(__FILE__) + '/test_helper' +class BaseResponseTest < Test::Unit::TestCase + def setup + @headers = {'content-type' => 'text/plain', 'date' => Time.now} + @response = FakeResponse.new() + @base_response = Base::Response.new(@response) + end + + def test_status_predicates + response = Proc.new {|code| Base::Response.new(FakeResponse.new(:code => code))} + assert response[200].success? + assert response[300].redirect? + assert response[400].client_error? + assert response[500].server_error? + end + + def test_headers_passed_along_from_original_response + assert_equal @response.headers, @base_response.headers + assert_equal @response['date'], @base_response['date'] + original_headers, new_headers = {}, {} + @response.headers.each {|k,v| original_headers[k] = v} + @base_response.each {|k,v| new_headers[k] = v} + assert_equal original_headers, new_headers + end +end + +class ErrorResponseTest < Test::Unit::TestCase + def test_error_responses_are_always_in_error + assert Error::Response.new(FakeResponse.new).error? + assert Error::Response.new(FakeResponse.new(:code => 200)).error? + assert Error::Response.new(FakeResponse.new(:headers => {'content-type' => 'text/plain'})).error? + end +end + +class S3ObjectResponseTest < Test::Unit::TestCase + def test_etag_extracted + S3Object.in_test_mode do + S3Object.request_returns :headers => {"etag"=>"\"acbd18db4cc2f85cedef654fccc4a4d8\""} + object_response = S3Object.create('name_does_not_matter', 'data does not matter', 'bucket does not matter') + assert_equal "acbd18db4cc2f85cedef654fccc4a4d8", object_response.etag + end + end +end + +class ResponseClassFinderTest < Test::Unit::TestCase + class CampfireBucket < Bucket + end + + class BabyBase < Base + end + + def test_on_base + assert_equal Base::Response, FindResponseClass.for(Base) + assert_equal Base::Response, FindResponseClass.for(AWS::S3::Base) + + end + + def test_on_subclass_with_corresponding_response_class + assert_equal Bucket::Response, FindResponseClass.for(Bucket) + assert_equal Bucket::Response, FindResponseClass.for(AWS::S3::Bucket) + end + + def test_on_subclass_with_intermediary_parent_that_has_corresponding_response_class + assert_equal Bucket::Response, FindResponseClass.for(CampfireBucket) + end + + def test_on_subclass_with_no_corresponding_response_class_and_no_intermediary_parent + assert_equal Base::Response, FindResponseClass.for(BabyBase) + end +end \ No newline at end of file diff --git a/test/service_test.rb b/test/service_test.rb new file mode 100644 index 0000000..7ab7df5 --- /dev/null +++ b/test/service_test.rb @@ -0,0 +1,26 @@ +require File.dirname(__FILE__) + '/test_helper' + +class ServiceTest < Test::Unit::TestCase + def test_bucket_list_with_empty_bucket_list + Service.request_always_returns :body => Fixtures::Buckets.empty_bucket_list, :code => 200 do + list = Service.buckets(:reload) + assert_equal [], list + end + end + + def test_bucket_list_with_bucket_list_containing_one_bucket + Service.request_always_returns :body => Fixtures::Buckets.bucket_list_with_one_bucket, :code => 200 do + list = Service.buckets(:reload) + assert_equal 1, list.size + assert_equal 'marcel_molina', list.first.name + end + end + + def test_bucket_list_with_bucket_list_containing_more_than_one_bucket + Service.request_always_returns :body => Fixtures::Buckets.bucket_list_with_more_than_one_bucket, :code => 200 do + list = Service.buckets(:reload) + assert_equal 2, list.size + assert_equal %w(marcel_molina marcel_molina_jr), list.map {|bucket| bucket.name}.sort + end + end +end \ No newline at end of file diff --git a/test/test_helper.rb b/test/test_helper.rb new file mode 100644 index 0000000..d875b91 --- /dev/null +++ b/test/test_helper.rb @@ -0,0 +1,86 @@ +require 'test/unit' +$:.unshift File.dirname(__FILE__) + '/../lib' +require 'aws/s3' +require File.dirname(__FILE__) + '/mocks/base' +require File.dirname(__FILE__) + '/fixtures' +require_library_or_gem 'breakpoint' + +# Data copied from http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAuthentication.html +module AmazonDocExampleData + module Example1 + module_function + + def request + request = Net::HTTP::Put.new('/quotes/nelson') + request['Content-Md5'] = 'c8fdb181845a4ca6b8fec737b3581d76' + request['Content-Type'] = 'text/html' + request['Date'] = 'Thu, 17 Nov 2005 18:49:58 GMT' + request['X-Amz-Meta-Author'] = 'foo@bar.com' + request['X-Amz-Magic'] = 'abracadabra' + request + end + + def canonical_string + "PUT\nc8fdb181845a4ca6b8fec737b3581d76\ntext/html\nThu, 17 Nov 2005 18:49:58 GMT\nx-amz-magic:abracadabra\nx-amz-meta-author:foo@bar.com\n/quotes/nelson" + end + + def access_key_id + '44CF9590006BF252F707' + end + + def secret_access_key + 'OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV' + end + + def signature + 'jZNOcbfWmD/A/f3hSvVzXZjM2HU=' + end + + def authorization_header + 'AWS 44CF9590006BF252F707:jZNOcbfWmD/A/f3hSvVzXZjM2HU=' + end + end + + module Example3 + module_function + + def request + request = Net::HTTP::Get.new('/quotes/nelson') + request['Date'] = date + request + end + + def date + 'Thu Mar 9 01:24:20 CST 2006' + end + + def access_key_id + Example1.access_key_id + end + + def secret_access_key + Example1.secret_access_key + end + + def expires + 1141889120 + end + + def query_string + 'AWSAccessKeyId=44CF9590006BF252F707&Expires=1141889120&Signature=vjbyPxybdZaNmGa%2ByT272YEAiv4%3D' + end + + def canonical_string + "GET\n\n\n1141889120\n/quotes/nelson" + end + + end +end + +class Test::Unit::TestCase + include AWS::S3 + + def sample_proxy_settings + {:host => 'http://google.com', :port => 8080, :user => 'marcel', :password => 'secret'} + end +end \ No newline at end of file