Replace aws-s3 storage backend with the AWS SDK for Ruby. #579

Closed
wants to merge 5 commits into
from
View
@@ -3,7 +3,7 @@ source "http://rubygems.org"
gem "activerecord", :require => "active_record"
gem "appraisal"
gem "aruba"
-gem "aws-s3", :require => "aws/s3"
+gem "aws-sdk", "~>1.0"
gem "bundler"
gem "cocaine", "~>0.2"
gem "fog"
@@ -22,6 +22,7 @@ Feature: Rails integration
And I should see an image with a path of "/system/attachments/1/original/5k.png"
And the file at "/system/attachments/1/original/5k.png" should be the same as "test/fixtures/5k.png"
+ @s3
Scenario: S3 Integration test
Given I add this snippet to the User model:
"""
@@ -10,7 +10,7 @@
gem "sqlite3"
gem "capybara"
gem "gherkin"
- gem "aws-s3"
+ gem "aws-sdk"
"""
And I configure the application to use "paperclip" from this project
And I reset Bundler environment variable
@@ -1,6 +1,6 @@
When /^I attach the file "([^"]*)" to "([^"]*)" on S3$/ do |file_path, field|
definition = User.attachment_definitions[field.downcase.to_sym]
- path = "http://s3.amazonaws.com/paperclip#{definition[:path]}"
+ path = "https://paperclip.s3.amazonaws.com#{definition[:path]}"
path.gsub!(':filename', File.basename(file_path))
path.gsub!(/:([^\/\.]+)/) do |match|
"([^\/\.]+)"
@@ -1,25 +0,0 @@
-# This file was generated by Appraisal
-
-source "http://rubygems.org"
-
-gem "activerecord", :require=>"active_record"
-gem "appraisal"
-gem "aruba"
-gem "aws-s3", :require=>"aws/s3"
-gem "bundler"
-gem "cocaine", "~>0.2"
-gem "fog"
-gem "jruby-openssl", :platform=>:jruby
-gem "mime-types"
-gem "mocha"
-gem "rake"
-gem "rdoc", :require=>false
-gem "capybara"
-gem "cucumber", "~> 1.0.0"
-gem "shoulda"
-gem "sqlite3", "~>1.3.4"
-gem "fakeweb", :require=>false
-gem "pry"
-gem "rails", "~> 2.3.14"
-gem "paperclip", :path=>"../"
-
@@ -1,25 +0,0 @@
-# This file was generated by Appraisal
-
-source "http://rubygems.org"
-
-gem "activerecord", :require=>"active_record"
-gem "appraisal"
-gem "aruba"
-gem "aws-s3", :require=>"aws/s3"
-gem "bundler"
-gem "cocaine", "~>0.2"
-gem "fog"
-gem "jruby-openssl", :platform=>:jruby
-gem "mime-types"
-gem "mocha"
-gem "rake"
-gem "rdoc", :require=>false
-gem "capybara"
-gem "cucumber", "~> 1.0.0"
-gem "shoulda"
-gem "sqlite3", "~>1.3.4"
-gem "fakeweb", :require=>false
-gem "pry"
-gem "rails", "~> 3.0.10"
-gem "paperclip", :path=>"../"
-
@@ -1,25 +0,0 @@
-# This file was generated by Appraisal
-
-source "http://rubygems.org"
-
-gem "activerecord", :require=>"active_record"
-gem "appraisal"
-gem "aruba"
-gem "aws-s3", :require=>"aws/s3"
-gem "bundler"
-gem "cocaine", "~>0.2"
-gem "fog"
-gem "jruby-openssl", :platform=>:jruby
-gem "mime-types"
-gem "mocha"
-gem "rake"
-gem "rdoc", :require=>false
-gem "capybara"
-gem "cucumber", "~> 1.0.0"
-gem "shoulda"
-gem "sqlite3", "~>1.3.4"
-gem "fakeweb", :require=>false
-gem "pry"
-gem "rails", "~> 3.1.0"
-gem "paperclip", :path=>"../"
-
@@ -43,7 +43,9 @@ def initialize(attachment, hash)
@s3_permissions = hash[:s3_permissions]
@s3_protocol = hash[:s3_protocol]
@s3_headers = hash[:s3_headers]
+ @s3_metadata = hash[:s3_metadata]
@s3_host_alias = hash[:s3_host_alias]
+ @s3_storage_class = hash[:s3_storage_class]
#fog options
@fog_directory = hash[:fog_directory]
@@ -1,5 +1,8 @@
+require 'uri'
+
module Paperclip
module Storage
+
# Amazon's S3 file hosting service is a scalable, easy place to store files for
# distribution. You can find out more about it at http://aws.amazon.com/s3
# There are a few S3-specific options for has_attached_file:
@@ -67,12 +70,21 @@ module Storage
# S3 (strictly speaking) does not support directories, you can still use a / to
# separate parts of your file name.
# * +s3_host_name+: If you are using your bucket in Tokyo region etc, write host_name.
+ # * +s3_metadata+: These key/value pairs will be stored with the
+ # object. This option works by prefixing each key with
+ # "x-amz-meta-" before sending it as a header on the object
+ # upload request.
+ # * +s3_storage_class+: If this option is set to
+ # <tt>:reduced_redundancy</tt>, the object will be stored using Reduced
+ # Redundancy Storage. RRS enables customers to reduce their
+ # costs by storing non-critical, reproducible data at lower
+ # levels of redundancy than Amazon S3's standard storage.
module S3
def self.extended base
begin
require 'aws/s3'
rescue LoadError => e
- e.message << " (You may need to install the aws-s3 gem)"
+ e.message << " (You may need to install the aws-sdk gem)"
raise e
end unless defined?(AWS::S3)
@@ -83,7 +95,21 @@ def self.extended base
Proc.new do |style|
(@s3_permissions[style.to_sym] || @s3_permissions[:default]) == :public_read ? 'http' : 'https'
end
- @s3_headers = @options.s3_headers || {}
+
+ @s3_metadata = @options.s3_metadata || {}
+ @s3_headers = (@options.s3_headers || {}).inject({}) do |headers,(name,value)|
+ case name.to_s
+ when /^x-amz-meta-(.*)/i
+ @s3_metadata[$1.downcase] = value
+ else
+ name = name.to_s.downcase.sub(/^x-amz-/,'').tr("-","_").to_sym
+ headers[name] = value
+ end
+ headers
+ end
+
+ @s3_headers[:storage_class] = @options.s3_storage_class if
+ @options.s3_storage_class
unless @options.url.to_s.match(/^:s3.*url$/) || @options.url == ":asset_host"
@options.path = @options.path.gsub(/:url/, @options.url).gsub(/^:rails_root\/public\/system/, '')
@@ -92,14 +118,7 @@ def self.extended base
@options.url = @options.url.inspect if @options.url.is_a?(Symbol)
@http_proxy = @options.http_proxy || nil
- if @http_proxy
- @s3_options.merge!({:proxy => @http_proxy})
- end
- AWS::S3::Base.establish_connection!( @s3_options.merge(
- :access_key_id => s3_credentials[:access_key_id],
- :secret_access_key => s3_credentials[:secret_access_key]
- ))
end
Paperclip.interpolates(:s3_alias_url) do |attachment, style|
"#{attachment.s3_protocol(style)}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
@@ -116,7 +135,10 @@ def self.extended base
end
def expiring_url(time = 3600, style_name = default_style)
- AWS::S3::S3Object.url_for(path(style_name), bucket_name, :expires_in => time, :use_ssl => (s3_protocol(style_name) == 'https'))
+ s3_object(style_name).url_for(:read,
+ :expires => time,
+ :secure =>
+ (s3_protocol(style_name) == 'https')).to_s
end
def s3_credentials
@@ -136,7 +158,43 @@ def s3_host_alias
def bucket_name
@bucket = @options.bucket || s3_credentials[:bucket]
@bucket = @bucket.call(self) if @bucket.is_a?(Proc)
- @bucket
+ @bucket or raise ArgumentError, "missing required :bucket option"
+ end
+
+ def s3_interface
+ @s3_interface ||= begin
+
+ config = {}
+
+ config[:s3_endpoint] = s3_host_name
+
+ if using_http_proxy?
+ proxy_opts = {}
+ proxy_opts[:host] = http_proxy_host
+ proxy_opts[:port] = http_proxy_port if http_proxy_port
+ if http_proxy_user
+ userinfo = http_proxy_user.to_s
+ userinfo += ":#{http_proxy_password}" if http_proxy_password
+ proxy_opts[:userinfo] = userinfo
+ end
+ config[:proxy_uri] = URI::HTTP.build(proxy_opts)
+ end
+
+ [:access_key_id, :secret_access_key].each do |opt|
+ config[opt] = s3_credentials[opt] if s3_credentials[opt]
+ end
+
+ AWS::S3.new(config.merge(@s3_options))
+
+ end
+ end
+
+ def s3_bucket
+ @s3_bucket ||= s3_interface.buckets[bucket_name]
+ end
+
+ def s3_object style_name = default_style
+ s3_bucket.objects[path(style_name).sub(%r{^/},'')]
end
def using_http_proxy?
@@ -176,7 +234,7 @@ def parse_credentials creds
def exists?(style = default_style)
if original_filename
- AWS::S3::S3Object.exists?(path(style), bucket_name)
+ s3_object(style).exists?
else
false
end
@@ -199,30 +257,29 @@ def to_file style = default_style
basename = File.basename(filename, extname)
file = Tempfile.new([basename, extname])
file.binmode
- file.write(AWS::S3::S3Object.value(path(style), bucket_name))
+ file.write(s3_object(style).read)
file.rewind
return file
end
def create_bucket
- AWS::S3::Bucket.create(bucket_name)
+ s3_interface.buckets.create(bucket_name)
end
def flush_writes #:nodoc:
@queued_for_write.each do |style, file|
begin
log("saving #{path(style)}")
- AWS::S3::S3Object.store(path(style),
- file,
- bucket_name,
- {:content_type => file.content_type.to_s.strip,
- :access => (@s3_permissions[style] || @s3_permissions[:default]),
- }.merge(@s3_headers))
- rescue AWS::S3::NoSuchBucket => e
+ write_options = {
+ :content_type => file.content_type.to_s.strip,
+ :acl => (@s3_permissions[style] || @s3_permissions[:default]),
+ }
+ write_options[:metadata] = @s3_metadata unless @s3_metadata.empty?
+ write_options.merge!(@s3_headers)
+ s3_object(style).write(file, write_options)
+ rescue AWS::S3::Errors::NoSuchBucket => e
create_bucket
retry
- rescue AWS::S3::ResponseError => e
- raise
end
end
@@ -235,8 +292,8 @@ def flush_deletes #:nodoc:
@queued_for_delete.each do |path|
begin
log("deleting #{path}")
- AWS::S3::S3Object.delete(path, bucket_name)
- rescue AWS::S3::ResponseError
+ s3_bucket.objects[path.sub(%r{^/},'')].delete
+ rescue AWS::Errors::Base => e
# Ignore this.
end
end
Oops, something went wrong.