Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Split storage.rb into storage/filesystem.rb and storage/s3.rb.

  • Loading branch information...
commit c5b6595b5667e7ff107c4175a5f1553c68c27056 1 parent 477e3a9
@postmodern postmodern authored solnic committed
View
4 dm-paperclip.gemspec
@@ -6,10 +6,10 @@ Gem::Specification.new do |s|
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
s.authors = ["Ken Robertson"]
- s.date = %q{2010-09-20}
+ s.date = %q{2010-10-09}
s.email = %q{ken@invalidlogic.com}
s.extra_rdoc_files = ["README.rdoc"]
- s.files = ["README.rdoc", "LICENSE", "Rakefile", "init.rb", "lib/dm-paperclip.rb", "lib/dm-paperclip", "lib/dm-paperclip/geometry.rb", "lib/dm-paperclip/attachment.rb", "lib/dm-paperclip/storage.rb", "lib/dm-paperclip/iostream.rb", "lib/dm-paperclip/upfile.rb", "lib/dm-paperclip/callback_compatability.rb", "lib/dm-paperclip/interpolations.rb", "lib/dm-paperclip/validations.rb", "lib/dm-paperclip/thumbnail.rb", "lib/dm-paperclip/processor.rb", "tasks/paperclip_tasks.rake", "test/iostream_test.rb", "test/attachment_test.rb", "test/storage_test.rb", "test/thumbnail_test.rb", "test/integration_test.rb", "test/geometry_test.rb", "test/paperclip_test.rb", "test/fixtures", "test/fixtures/s3.yml", "test/fixtures/12k.png", "test/fixtures/text.txt", "test/fixtures/bad.png", "test/fixtures/50x50.png", "test/fixtures/5k.png", "test/helper.rb"]
+ s.files = ["README.rdoc", "LICENSE", "Rakefile", "init.rb", "lib/dm-paperclip.rb", "lib/dm-paperclip", "lib/dm-paperclip/validations.rb", "lib/dm-paperclip/geometry.rb", "lib/dm-paperclip/storage", "lib/dm-paperclip/storage/s3.rb", "lib/dm-paperclip/storage/filesystem.rb", "lib/dm-paperclip/iostream.rb", "lib/dm-paperclip/storage.rb", "lib/dm-paperclip/thumbnail.rb", "lib/dm-paperclip/attachment.rb", "lib/dm-paperclip/upfile.rb", "lib/dm-paperclip/callback_compatability.rb", "lib/dm-paperclip/processor.rb", "lib/dm-paperclip/interpolations.rb", "tasks/paperclip_tasks.rake", "test/paperclip_test.rb", "test/attachment_test.rb", "test/geometry_test.rb", "test/integration_test.rb", "test/iostream_test.rb", "test/fixtures", "test/fixtures/5k.png", "test/fixtures/bad.png", "test/fixtures/s3.yml", "test/fixtures/50x50.png", "test/fixtures/text.txt", "test/fixtures/12k.png", "test/storage_test.rb", "test/helper.rb", "test/thumbnail_test.rb"]
s.homepage = %q{http://invalidlogic.com/dm-paperclip/}
s.rdoc_options = ["--line-numbers", "--inline-source"]
s.require_paths = ["lib"]
View
292 lib/dm-paperclip/storage.rb
@@ -1,290 +1,2 @@
-module Paperclip
- module Storage
-
- # The default place to store attachments is in the filesystem. Files on
- # the local filesystem can be very easily served by Apache without
- # requiring a hit to your app. They also can be processed more easily
- # after they've been saved, as they're just normal files. There is one
- # Filesystem-specific option for has_attached_file.
- # * +path+: The location of the repository of attachments on disk. This
- # can (and, in almost all cases, should) be coordinated with the
- # value of the +url+ option to allow files to be saved into a place
- # where Apache can serve them without hitting your app. Defaults to:
- # ":rails_root/public/:attachment/:id/:style/:basename.:extension"
- # By default this places the files in the app's public directory
- # which can be served directly. If you are using capistrano for
- # deployment, a good idea would be to make a symlink to the
- # capistrano-created system directory from inside your app's public
- # directory. See Paperclip::Attachment#interpolate for more
- # information on variable interpolaton.
- # :path => "/var/app/attachments/:class/:id/:style/:basename.:extension"
- module Filesystem
- def self.extended(base)
- end
-
- def exists?(style_name = default_style)
- if original_filename
- File.exist?(path(style_name))
- else
- false
- end
- end
-
- # Returns representation of the data of the file assigned to the given
- # style, in the format most representative of the current storage.
- def to_file(style_name = default_style)
- @queued_for_write[style_name] || (File.new(path(style_name), 'rb') if exists?(style_name))
- end
-
- def flush_writes #:nodoc:
- @queued_for_write.each do |style_name, file|
- file.close
- FileUtils.mkdir_p(File.dirname(path(style_name)))
- log("saving #{path(style_name)}")
- FileUtils.mv(file.path, path(style_name))
- FileUtils.chmod(0644, path(style_name))
- end
- @queued_for_write = {}
- end
-
- def flush_deletes #:nodoc:
- @queued_for_delete.each do |path|
- begin
- log("deleting #{path}")
- FileUtils.rm(path) if File.exist?(path)
- rescue Errno::ENOENT => e
- # ignore file-not-found, let everything else pass
- end
- begin
- while(true)
- path = File.dirname(path)
- FileUtils.rmdir(path)
- end
- rescue Errno::EEXIST, Errno::ENOTEMPTY, Errno::ENOENT, Errno::EINVAL, Errno::ENOTDIR
- # Stop trying to remove parent directories
- rescue SystemCallError => e
- log("There was an unexpected error while deleting directories: #{e.class}")
- # Ignore it
- end
- end
- @queued_for_delete = []
- end
- end
-
- # Amazon's S3 file hosting service is a scalable, easy place to store
- # files for distribution. You can find out more about it at
- # http://aws.amazon.com/s3. There are a few S3-specific options for
- # +has_attached_file+:
- # * +s3_credentials+: Takes a path, a File, or a Hash. The path
- # (or File) must point to a YAML file containing the
- # +access_key_id+ and +secret_access_key+ that Amazon gives you.
- # You can 'environment-space' this just like you do to your
- # +database.yml+ file, so different environments can use different
- # accounts:
- #
- # development:
- # access_key_id: 123...
- # secret_access_key: 123...
- # test:
- # access_key_id: abc...
- # secret_access_key: abc...
- # production:
- # access_key_id: 456...
- # secret_access_key: 456...
- #
- # This is not required, however, and the file may simply look like
- # this:
- #
- # access_key_id: 456...
- # secret_access_key: 456...
- #
- # In which case, those access keys will be used in all environments.
- # You can also put your bucket name in this file, instead of adding
- # it to the code directly. This is useful when you want the same
- # account but a different bucket for development versus production.
- # * +s3_permissions+: This is a String that should be one of the
- # "canned" access policies that S3 provides (more information can be
- # found here: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html#RESTCannedAccessPolicies).
- # The default for Paperclip is +:public_read+.
- # * +s3_protocol+: The protocol for the URLs generated to your S3
- # assets. Can be either 'http' or 'https'. Defaults to 'http' when
- # your +:s3_permissions+ are +:public_read+ (the default), and 'https'
- # when your +:s3_permissions+ are anything else.
- # * +s3_headers+: A hash of headers such as:
- #
- # {'Expires' => 1.year.from_now.httpdate}
- #
- # * +bucket+: This is the name of the S3 bucket that will store your
- # files. Remember that the bucket must be unique across all of
- # Amazon S3. If the bucket does not exist Paperclip will attempt to
- # create it. The bucket name will not be interpolated. You can define
- # the bucket as a Proc if you want to determine it's name at runtime.
- # Paperclip will call that Proc with attachment as the only argument.
- # * +s3_host_alias+: The fully-qualified domain name (FQDN) that is the
- # alias to the S3 domain of your bucket. Used with the +:s3_alias_url+
- # url interpolation. See the link in the +url+ entry for more
- # information about S3 domains and buckets.
- # * +url+: There are three options for the S3 url. You can choose to
- # have the bucket's name placed domain-style
- # (bucket.s3.amazonaws.com) or path-style (s3.amazonaws.com/bucket).
- # Lastly, you can specify a CNAME (which requires the CNAME to be
- # specified as +:s3_alias_url+. You can read more about CNAMEs and S3
- # at http://docs.amazonwebservices.com/AmazonS3/latest/index.html?VirtualHosting.html.
- # Normally, this won't matter in the slightest and you can leave the
- # default (which is path-style, or +:s3_path_url+). But in some cases
- # paths don't work and you need to use the domain-style
- # (+:s3_domain_url+). Anything else here will be treated like
- # path-style.
- # NOTE: If you use a CNAME for use with CloudFront, you can NOT
- # specify https as your +:s3_protocol+; This is *not supported* by
- # S3/CloudFront. Finally, when using the host alias, the +:bucket+
- # parameter is ignored, as the hostname is used as the bucket name
- # by S3.
- # * +path+: This is the key under the bucket in which the file will be
- # stored. The URL will be constructed from the bucket and the path.
- # This is what you will want to interpolate. Keys should be unique,
- # like filenames, and despite the fact that S3 (strictly speaking)
- # does not support directories, you can still use a / to separate
- # parts of your file name.
- module S3
- def self.extended(base)
- begin
- require 'aws/s3'
- rescue LoadError => e
- e.message << ' (You may need to install the aws-s3 gem)'
- raise e
- end
-
- base.instance_eval do
- @s3_credentials = parse_credentials(@options[:s3_credentials])
- @bucket = (@options[:bucket] || @s3_credentials[:bucket])
- @bucket = @bucket.call(self) if @bucket.is_a?(Proc)
- @s3_options = (@options[:s3_options] || {})
- @s3_permissions = (@options[:s3_permissions] || :public_read)
- @s3_protocol = (@options[:s3_protocol] || (@s3_permissions == :public_read ? 'http' : 'https'))
- @s3_headers = (@options[:s3_headers] || {})
- @s3_host_alias = @options[:s3_host_alias]
- @url = ':s3_path_url' unless @url.to_s.match(/^:s3.*url$/)
- AWS::S3::Base.establish_connection!( @s3_options.merge(
- :access_key_id => @s3_credentials[:access_key_id],
- :secret_access_key => @s3_credentials[:secret_access_key]
- ))
- end
-
- Paperclip.interpolates(:s3_alias_url) do |attachment, style|
- "#{attachment.s3_protocol}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, '')}"
- end
-
- Paperclip.interpolates(:s3_path_url) do |attachment, style|
- "#{attachment.s3_protocol}://s3.amazonaws.com/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, '')}"
- end
-
- Paperclip.interpolates(:s3_domain_url) do |attachment, style|
- "#{attachment.s3_protocol}://#{attachment.bucket_name}.s3.amazonaws.com/#{attachment.path(style).gsub(%r{^/}, '')}"
- end
- end
-
- def expiring_url(time = 3600)
- AWS::S3::S3Object.url_for(path, bucket_name, :expires_in => time )
- end
-
- def bucket_name
- @bucket
- end
-
- def s3_host_alias
- @s3_host_alias
- end
-
- def parse_credentials(creds)
- creds = find_credentials(creds).to_mash.stringify_keys!
- if defined?(Merb) && Merb.respond_to?(:env)
- (creds[Merb.env] || creds).symbolize_keys
- elsif defined?(RAILS_ENV)
- (creds[RAILS_ENV] || creds).symbolize_keys
- elsif defined?(Rails) && Rails.respond_to(:env)
- (creds[Rails.env] || creds).symbolize_keys
- elsif defined?(RACK_ENV)
- (creds[RACK_ENV] || creds).symbolize_keys
- else
- creds.symbolize_keys
- end
- end
-
- def exists?(style = default_style)
- if original_filename
- AWS::S3::S3Object.exists?(path(style), bucket_name)
- else
- false
- end
- end
-
- def s3_protocol
- @s3_protocol
- end
-
- # Returns representation of the data of the file assigned to the given
- # style, in the format most representative of the current storage.
- def to_file(style = default_style)
- return @queued_for_write[style] if @queued_for_write[style]
-
- file = Tempfile.new(path(style))
- file.write(AWS::S3::S3Object.value(path(style), bucket_name))
- file.rewind
- return file
- end
-
- def flush_writes #:nodoc:
- @queued_for_write.each do |style, file|
- begin
- log("saving #{path(style)}")
-
- AWS::S3::S3Object.store(
- path(style),
- file,
- bucket_name,
- {
- :content_type => instance_read(:content_type),
- :access => @s3_permissions,
- }.merge(@s3_headers)
- )
- rescue AWS::S3::ResponseError => e
- raise
- end
- end
-
- @queued_for_write = {}
- end
-
- def flush_deletes #:nodoc:
- @queued_for_delete.each do |path|
- begin
- log("deleting #{path}")
-
- AWS::S3::S3Object.delete(path, bucket_name)
- rescue AWS::S3::ResponseError
- # Ignore this.
- end
- end
-
- @queued_for_delete = []
- end
-
- def find_credentials(creds)
- case creds
- when File
- YAML::load(ERB.new(File.read(creds.path)).result)
- when Pathname, String
- YAML::load(ERB.new(File.read(creds)).result)
- when Hash
- creds
- else
- raise ArgumentError, 'Credentials are not a path, file, or hash.'
- end
- end
-
- private :find_credentials
-
- end
- end
-end
+require 'dm-paperclip/storage/filesystem'
+require 'dm-paperclip/storage/s3'
View
73 lib/dm-paperclip/storage/filesystem.rb
@@ -0,0 +1,73 @@
+module Paperclip
+ module Storage
+ # The default place to store attachments is in the filesystem. Files on
+ # the local filesystem can be very easily served by Apache without
+ # requiring a hit to your app. They also can be processed more easily
+ # after they've been saved, as they're just normal files. There is one
+ # Filesystem-specific option for has_attached_file.
+ # * +path+: The location of the repository of attachments on disk. This
+ # can (and, in almost all cases, should) be coordinated with the
+ # value of the +url+ option to allow files to be saved into a place
+ # where Apache can serve them without hitting your app. Defaults to:
+ # ":rails_root/public/:attachment/:id/:style/:basename.:extension"
+ # By default this places the files in the app's public directory
+ # which can be served directly. If you are using capistrano for
+ # deployment, a good idea would be to make a symlink to the
+ # capistrano-created system directory from inside your app's public
+ # directory. See Paperclip::Attachment#interpolate for more
+ # information on variable interpolaton.
+ # :path => "/var/app/attachments/:class/:id/:style/:basename.:extension"
+ module Filesystem
+ def self.extended(base)
+ end
+
+ def exists?(style_name = default_style)
+ if original_filename
+ File.exist?(path(style_name))
+ else
+ false
+ end
+ end
+
+ # Returns representation of the data of the file assigned to the given
+ # style, in the format most representative of the current storage.
+ def to_file(style_name = default_style)
+ @queued_for_write[style_name] || (File.new(path(style_name), 'rb') if exists?(style_name))
+ end
+
+ def flush_writes #:nodoc:
+ @queued_for_write.each do |style_name, file|
+ file.close
+ FileUtils.mkdir_p(File.dirname(path(style_name)))
+ log("saving #{path(style_name)}")
+ FileUtils.mv(file.path, path(style_name))
+ FileUtils.chmod(0644, path(style_name))
+ end
+ @queued_for_write = {}
+ end
+
+ def flush_deletes #:nodoc:
+ @queued_for_delete.each do |path|
+ begin
+ log("deleting #{path}")
+ FileUtils.rm(path) if File.exist?(path)
+ rescue Errno::ENOENT => e
+ # ignore file-not-found, let everything else pass
+ end
+ begin
+ while(true)
+ path = File.dirname(path)
+ FileUtils.rmdir(path)
+ end
+ rescue Errno::EEXIST, Errno::ENOTEMPTY, Errno::ENOENT, Errno::EINVAL, Errno::ENOTDIR
+ # Stop trying to remove parent directories
+ rescue SystemCallError => e
+ log("There was an unexpected error while deleting directories: #{e.class}")
+ # Ignore it
+ end
+ end
+ @queued_for_delete = []
+ end
+ end
+ end
+end
View
219 lib/dm-paperclip/storage/s3.rb
@@ -0,0 +1,219 @@
+module Paperclip
+ module Storage
+ # Amazon's S3 file hosting service is a scalable, easy place to store
+ # files for distribution. You can find out more about it at
+ # http://aws.amazon.com/s3. There are a few S3-specific options for
+ # +has_attached_file+:
+ # * +s3_credentials+: Takes a path, a File, or a Hash. The path
+ # (or File) must point to a YAML file containing the
+ # +access_key_id+ and +secret_access_key+ that Amazon gives you.
+ # You can 'environment-space' this just like you do to your
+ # +database.yml+ file, so different environments can use different
+ # accounts:
+ #
+ # development:
+ # access_key_id: 123...
+ # secret_access_key: 123...
+ # test:
+ # access_key_id: abc...
+ # secret_access_key: abc...
+ # production:
+ # access_key_id: 456...
+ # secret_access_key: 456...
+ #
+ # This is not required, however, and the file may simply look like
+ # this:
+ #
+ # access_key_id: 456...
+ # secret_access_key: 456...
+ #
+ # In which case, those access keys will be used in all environments.
+ # You can also put your bucket name in this file, instead of adding
+ # it to the code directly. This is useful when you want the same
+ # account but a different bucket for development versus production.
+ # * +s3_permissions+: This is a String that should be one of the
+ # "canned" access policies that S3 provides (more information can be
+ # found here: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html#RESTCannedAccessPolicies).
+ # The default for Paperclip is +:public_read+.
+ # * +s3_protocol+: The protocol for the URLs generated to your S3
+ # assets. Can be either 'http' or 'https'. Defaults to 'http' when
+ # your +:s3_permissions+ are +:public_read+ (the default), and 'https'
+ # when your +:s3_permissions+ are anything else.
+ # * +s3_headers+: A hash of headers such as:
+ #
+ # {'Expires' => 1.year.from_now.httpdate}
+ #
+ # * +bucket+: This is the name of the S3 bucket that will store your
+ # files. Remember that the bucket must be unique across all of
+ # Amazon S3. If the bucket does not exist Paperclip will attempt to
+ # create it. The bucket name will not be interpolated. You can define
+ # the bucket as a Proc if you want to determine it's name at runtime.
+ # Paperclip will call that Proc with attachment as the only argument.
+ # * +s3_host_alias+: The fully-qualified domain name (FQDN) that is the
+ # alias to the S3 domain of your bucket. Used with the +:s3_alias_url+
+ # url interpolation. See the link in the +url+ entry for more
+ # information about S3 domains and buckets.
+ # * +url+: There are three options for the S3 url. You can choose to
+ # have the bucket's name placed domain-style
+ # (bucket.s3.amazonaws.com) or path-style (s3.amazonaws.com/bucket).
+ # Lastly, you can specify a CNAME (which requires the CNAME to be
+ # specified as +:s3_alias_url+. You can read more about CNAMEs and S3
+ # at http://docs.amazonwebservices.com/AmazonS3/latest/index.html?VirtualHosting.html.
+ # Normally, this won't matter in the slightest and you can leave the
+ # default (which is path-style, or +:s3_path_url+). But in some cases
+ # paths don't work and you need to use the domain-style
+ # (+:s3_domain_url+). Anything else here will be treated like
+ # path-style.
+ # NOTE: If you use a CNAME for use with CloudFront, you can NOT
+ # specify https as your +:s3_protocol+; This is *not supported* by
+ # S3/CloudFront. Finally, when using the host alias, the +:bucket+
+ # parameter is ignored, as the hostname is used as the bucket name
+ # by S3.
+ # * +path+: This is the key under the bucket in which the file will be
+ # stored. The URL will be constructed from the bucket and the path.
+ # This is what you will want to interpolate. Keys should be unique,
+ # like filenames, and despite the fact that S3 (strictly speaking)
+ # does not support directories, you can still use a / to separate
+ # parts of your file name.
+ module S3
+ def self.extended(base)
+ begin
+ require 'aws/s3'
+ rescue LoadError => e
+ e.message << ' (You may need to install the aws-s3 gem)'
+ raise e
+ end
+
+ base.instance_eval do
+ @s3_credentials = parse_credentials(@options[:s3_credentials])
+ @bucket = (@options[:bucket] || @s3_credentials[:bucket])
+ @bucket = @bucket.call(self) if @bucket.is_a?(Proc)
+ @s3_options = (@options[:s3_options] || {})
+ @s3_permissions = (@options[:s3_permissions] || :public_read)
+ @s3_protocol = (@options[:s3_protocol] || (@s3_permissions == :public_read ? 'http' : 'https'))
+ @s3_headers = (@options[:s3_headers] || {})
+ @s3_host_alias = @options[:s3_host_alias]
+ @url = ':s3_path_url' unless @url.to_s.match(/^:s3.*url$/)
+ AWS::S3::Base.establish_connection!( @s3_options.merge(
+ :access_key_id => @s3_credentials[:access_key_id],
+ :secret_access_key => @s3_credentials[:secret_access_key]
+ ))
+ end
+
+ Paperclip.interpolates(:s3_alias_url) do |attachment, style|
+ "#{attachment.s3_protocol}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, '')}"
+ end
+
+ Paperclip.interpolates(:s3_path_url) do |attachment, style|
+ "#{attachment.s3_protocol}://s3.amazonaws.com/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, '')}"
+ end
+
+ Paperclip.interpolates(:s3_domain_url) do |attachment, style|
+ "#{attachment.s3_protocol}://#{attachment.bucket_name}.s3.amazonaws.com/#{attachment.path(style).gsub(%r{^/}, '')}"
+ end
+ end
+
+ def expiring_url(time = 3600)
+ AWS::S3::S3Object.url_for(path, bucket_name, :expires_in => time )
+ end
+
+ def bucket_name
+ @bucket
+ end
+
+ def s3_host_alias
+ @s3_host_alias
+ end
+
+ def parse_credentials(creds)
+ creds = find_credentials(creds).to_mash.stringify_keys!
+ if defined?(Merb) && Merb.respond_to?(:env)
+ (creds[Merb.env] || creds).symbolize_keys
+ elsif defined?(RAILS_ENV)
+ (creds[RAILS_ENV] || creds).symbolize_keys
+ elsif defined?(Rails) && Rails.respond_to(:env)
+ (creds[Rails.env] || creds).symbolize_keys
+ elsif defined?(RACK_ENV)
+ (creds[RACK_ENV] || creds).symbolize_keys
+ else
+ creds.symbolize_keys
+ end
+ end
+
+ def exists?(style = default_style)
+ if original_filename
+ AWS::S3::S3Object.exists?(path(style), bucket_name)
+ else
+ false
+ end
+ end
+
+ def s3_protocol
+ @s3_protocol
+ end
+
+ # Returns representation of the data of the file assigned to the given
+ # style, in the format most representative of the current storage.
+ def to_file(style = default_style)
+ return @queued_for_write[style] if @queued_for_write[style]
+
+ file = Tempfile.new(path(style))
+ file.write(AWS::S3::S3Object.value(path(style), bucket_name))
+ file.rewind
+ return file
+ end
+
+ def flush_writes #:nodoc:
+ @queued_for_write.each do |style, file|
+ begin
+ log("saving #{path(style)}")
+
+ AWS::S3::S3Object.store(
+ path(style),
+ file,
+ bucket_name,
+ {
+ :content_type => instance_read(:content_type),
+ :access => @s3_permissions,
+ }.merge(@s3_headers)
+ )
+ rescue AWS::S3::ResponseError => e
+ raise
+ end
+ end
+
+ @queued_for_write = {}
+ end
+
+ def flush_deletes #:nodoc:
+ @queued_for_delete.each do |path|
+ begin
+ log("deleting #{path}")
+
+ AWS::S3::S3Object.delete(path, bucket_name)
+ rescue AWS::S3::ResponseError
+ # Ignore this.
+ end
+ end
+
+ @queued_for_delete = []
+ end
+
+ def find_credentials(creds)
+ case creds
+ when File
+ YAML::load(ERB.new(File.read(creds.path)).result)
+ when Pathname, String
+ YAML::load(ERB.new(File.read(creds)).result)
+ when Hash
+ creds
+ else
+ raise ArgumentError, 'Credentials are not a path, file, or hash.'
+ end
+ end
+
+ private :find_credentials
+
+ end
+ end
+end
Please sign in to comment.
Something went wrong with that request. Please try again.