Permalink
Browse files

change resource pool to use Fog gem

The Fog gem provides all the indirection for local storage (over nfs) or s3
access and replaces the old FilesystemPool.

Change-Id: Ia5ede90e8fd6cc9729eb87464d1cc0017f2d1c50
  • Loading branch information...
1 parent 9b66f53 commit c0bbedd991facd1868179909e3dc5cb17156fd1d @pbozeman pbozeman committed with Glenn Oppegard & Patrick Bozeman Jan 28, 2013
View
@@ -3,6 +3,7 @@ source :rubygems
gem "rake"
gem "bcrypt-ruby"
gem 'eventmachine', "~> 1.0.0"
+gem 'fog'
gem "redis"
gem "rfc822"
gem "sequel"
View
@@ -56,6 +56,18 @@ GEM
em-socksify (0.2.1)
eventmachine (>= 1.0.0.beta.4)
eventmachine (1.0.0)
+ excon (0.16.10)
+ fog (1.9.0)
+ builder
+ excon (~> 0.14)
+ formatador (~> 0.2.0)
+ mime-types
+ multi_json (~> 1.0)
+ net-scp (~> 1.0.4)
+ net-ssh (>= 2.1.3)
+ nokogiri (~> 1.5.0)
+ ruby-hmac
+ formatador (0.2.4)
grape (0.2.2)
activesupport
hashie (~> 1.2)
@@ -87,6 +99,10 @@ GEM
eventmachine (>= 0.12.10)
json_pure (>= 1.7.3)
thin (>= 1.3.1)
+ net-scp (1.0.4)
+ net-ssh (>= 1.99.1)
+ net-ssh (2.6.3)
+ nokogiri (1.5.6)
pg (0.13.2)
posix-spawn (0.3.6)
rack (1.4.1)
@@ -110,6 +126,7 @@ GEM
diff-lcs (~> 1.1.3)
rspec-mocks (2.10.1)
ruby-graphviz (1.0.5)
+ ruby-hmac (0.4.0)
sequel (3.40.0)
simplecov (0.6.4)
multi_json (~> 1.0)
@@ -153,6 +170,7 @@ DEPENDENCIES
cf-uaa-lib (~> 1.3.0)
ci_reporter
eventmachine (~> 1.0.0)
+ fog
guard-rspec
httpclient
machinist (~> 1.0.6)
@@ -80,3 +80,9 @@ quota_definitions:
paid_memory_limit: 204800 # 200 GB
default_quota_definition: free
+
+resource_pool:
+ resource_directory_key: cc-resources
+ fog_connection:
+ provider: Local
+ local_root: /tmp
View
@@ -124,8 +124,7 @@ def validate_scheme(user)
require "cloud_controller/legacy_api/legacy_resources"
require "cloud_controller/legacy_api/legacy_users"
-require "cloud_controller/resource_pool/resource_pool"
-require "cloud_controller/resource_pool/filesystem_pool"
+require "cloud_controller/resource_pool"
require "cloud_controller/dea/dea_pool"
require "cloud_controller/dea/dea_client"
@@ -7,7 +7,7 @@ def match
return NotAuthorized unless user
# TODO: replace with json_message
descriptors = Yajl::Parser.parse(body)
- matched = FilesystemPool.match_resources(descriptors)
+ matched = ResourcePool.match_resources(descriptors)
Yajl::Encoder.encode(matched)
end
@@ -12,7 +12,6 @@ class << self
def configure(config = {})
@config = config.dup
@max_droplet_size = @config[:max_droplet_size] || 512 * 1024 * 1024
- @resource_pool = FilesystemPool
end
# Collects the necessary files and returns the sha1 of the resulting
@@ -90,7 +89,7 @@ def validate_package_size(uploaded_file, resources)
# Ugh, this stat's all the files that would need to be copied
# from the resource pool. Consider caching sizes in resource pool?
- sizes = resource_pool.resource_sizes(resources)
+ sizes = ResourcePool.resource_sizes(resources)
total_size += sizes.reduce(0) {|accum, cur| accum + cur["size"] }
validate_size(total_size)
end
@@ -152,11 +151,11 @@ def resolve_path(working_dir, tainted_path)
# Do resource pool synch
def synchronize_pool_with(working_dir, resource_descriptors)
- resource_pool.add_directory(working_dir)
+ ResourcePool.add_directory(working_dir)
resource_descriptors.each do |descriptor|
create_dir_skeleton(working_dir, descriptor["fn"])
path = resolve_path(working_dir, descriptor["fn"])
- resource_pool.copy(descriptor, path)
+ ResourcePool.copy(descriptor, path)
end
rescue => e
logger.error "failed synchronizing resource pool with '#{working_dir}' #{e}"
@@ -186,7 +185,7 @@ def logger
@logger ||= Steno.logger("cc.ap")
end
- attr_accessor :max_droplet_size, :resource_pool, :droplets_dir
+ attr_accessor :max_droplet_size, :droplets_dir
end
end
end
@@ -40,7 +40,6 @@ class VCAP::CloudController::Config < VCAP::Config
optional(:directories) => {
optional(:tmpdir) => String,
optional(:droplets) => String,
- optional(:resources) => String,
optional(:staging_manifests) => String,
},
@@ -93,6 +92,17 @@ class VCAP::CloudController::Config < VCAP::Config
:quota_definitions => Hash,
:default_quota_definition => String,
+
+ :resource_pool => {
+ optional(:maximum_size) => Integer,
+ optional(:resource_directory_key) => String,
+ :fog_connection => {
+ :provider => String,
+ optional(:aws_access_key_id) => String,
+ optional(:aws_secret_access_key) => String,
+ optional(:local_root) => String
+ }
+ }
}
end
@@ -116,7 +126,6 @@ def self.configure(config)
VCAP::CloudController::MessageBus.configure(config)
VCAP::CloudController::AccountCapacity.configure(config)
VCAP::CloudController::ResourcePool.configure(config)
- VCAP::CloudController::FilesystemPool.configure(config)
VCAP::CloudController::AppPackage.configure(config)
VCAP::CloudController::AppStager.configure(config)
VCAP::CloudController::LegacyStaging.configure(config)
@@ -1,42 +1,29 @@
# Copyright (c) 2009-2012 VMware, Inc.
#
-# This is pretty much a direct copy from the legacy cc.
-#
# A "resource" is typically represented as a Hash with two attributes:
# :size (bytes)
# :sha1 (string)
# If there are other attributes, such as in legacy calls to "match_resources",
# they will be ignored and preserved.
-#
-# See config/initializers/resource_pool.rb for where this is initialized
-# in production mode.
-# See spec/spec_helper.rb for the test initialization.
-#
-# TODO - Implement "Blob Store" subclass.
+require "fog"
require "steno"
class VCAP::CloudController::ResourcePool
-
class << self
attr_accessor :minimum_size, :maximum_size
def configure(config = {})
- # the old legacy code had a minimum requirement here that it set to 0.
- # but.. since all files bounce through the resource pool, that doesn't
- # make much sense.
opts = config[:resource_pool] || {}
+ @connection_config = opts[:fog_connection]
+ @resource_directory_key = opts[:resource_directory_key] || "cc-resources"
@maximum_size = opts[:maximum_size] || 512 * 1024 * 1024 # MB
end
def match_resources(descriptors)
descriptors.select { |h| resource_known?(h) }
end
- def resource_known?(descriptor)
- raise NotImplementedError, "Implemented in subclasses. See filesystem.rb for example."
- end
-
# Adds everything under source directory +dir+ to the resource pool.
def add_directory(dir)
unless File.exists?(dir) && File.directory?(dir)
@@ -53,18 +40,37 @@ def add_directory(dir)
end
end
- # Reads +path+ from the local disk and adds it to the pool, if needed.
def add_path(path)
- raise NotImplementedError, "Implement in each subclass"
+ sha1 = Digest::SHA1.file(path).hexdigest
+ key = key_from_sha1(sha1)
+ return if resource_dir.files.head(sha1)
+
+ resource_dir.files.create(
+ :key => key,
+ :body => File.open(path),
+ :public => false,
+ )
+ end
+
+ def resource_sizes(resources)
+ sizes = []
+ resources.each do |descriptor|
+ key = key_from_sha1(descriptor["sha1"])
+ if head = resource_dir.files.head(key)
+ entry = descriptor.dup
+ entry["size"] = head.content_length
+ sizes << entry
+ end
+ end
+ sizes
end
def copy(descriptor, destination)
- path = path_from_sha1(descriptor["sha1"])
if resource_known?(descriptor)
- logger.debug "resource pool sync #{descriptor} #{path}"
+ logger.debug "resource pool sync #{descriptor}"
overwrite_destination_with!(descriptor, destination)
else
- logger.warn "resource pool sync error #{descriptor} #{path}"
+ logger.warn "resource pool sync error #{descriptor}"
raise ArgumentError, "Can not copy bits we do not have #{descriptor}"
end
end
@@ -75,6 +81,11 @@ def logger
@logger ||= Steno.logger("cc.resource_pool")
end
+ def resource_known?(descriptor)
+ key = key_from_sha1(descriptor["sha1"])
+ resource_dir.files.head(key)
+ end
+
def resource_allowed?(path)
stat = File.stat(path)
File.file?(path) && !stat.symlink? && stat.size < maximum_size
@@ -83,7 +94,28 @@ def resource_allowed?(path)
# Called after we sanity-check the input.
# Create a new path on disk containing the resource described by +descriptor+
def overwrite_destination_with!(descriptor, destination)
- raise NotImplementedError, "Implemented in subclasses. See filesystem_pool for example."
+ FileUtils.mkdir_p File.dirname(destination)
+ s3_key = key_from_sha1(descriptor["sha1"])
+ s3_file = resource_dir.files.get(s3_key)
+ File.open(destination, "w") do |file|
+ file.write(s3_file.body)
+ end
+ end
+
+ def connection
+ Fog::Storage.new(@connection_config)
+ end
+
+ def resource_dir
+ @directory ||= connection.directories.create(
+ :key => @resource_directory_key,
+ :public => false,
+ )
+ end
+
+ def key_from_sha1(sha1)
+ sha1 = sha1.to_s.downcase
+ File.join(sha1[0..1], sha1[2..3], sha1)
end
end
end
@@ -1,74 +0,0 @@
-# Copyright (c) 2009-2012 VMware, Inc.
-#
-# This is pretty much a direct port from the legacy cc, with all the same
-# parameters so that legacy and ccng can share the same resource pool.
-
-class VCAP::CloudController::FilesystemPool < VCAP::CloudController::ResourcePool
- # These used to be configurable in the legacy cc, but, you really can't
- # change them once a resource pool is in place. That is not safe. Given
- # that we never configured these, lets just make them constants. We're going
- # to move away from file based resource pool to the vblob soon anyway.
- LEVELS = 2
- MODULOS = [269, 251]
-
- class << self
- attr_reader :directory, :levels, :modulos
-
- def configure(config = {})
- super
- unless @directory
- dir_config = config[:directories] || {}
- @directory = dir_config[:resources] || Dir.mktmpdir.to_s
- end
- @levels = LEVELS
- @modulos = MODULOS
- end
-
- def resource_known?(descriptor)
- resource_path = path_from_sha1(descriptor["sha1"])
- File.exists?(resource_path)
- end
-
- def add_path(path)
- file = File.stat(path)
- sha1 = Digest::SHA1.file(path).hexdigest
- resource_path = path_from_sha1(sha1)
- return if File.exists?(resource_path)
- FileUtils.mkdir_p File.dirname(resource_path)
- FileUtils.cp(path, resource_path)
- true
- end
-
- def resource_sizes(resources)
- sizes = []
- resources.each do |descriptor|
- resource_path = path_from_sha1(descriptor["sha1"])
- if File.exists?(resource_path)
- entry = descriptor.dup
- entry["size"] = File.size(resource_path)
- sizes << entry
- end
- end
- sizes
- end
-
- private
-
- def overwrite_destination_with!(descriptor, destination)
- FileUtils.mkdir_p File.dirname(destination)
- resource_path = path_from_sha1(descriptor["sha1"])
- FileUtils.cp(resource_path, destination)
- end
-
- def path_from_sha1(sha1)
- sha1 = sha1.to_s.downcase
- as_integer = Integer("0x#{sha1}")
- dirs = []
- levels.times do |i|
- dirs << as_integer.modulo(modulos[i]).to_s
- end
- dir = File.join(directory, *dirs)
- File.join(dir, sha1)
- end
- end
-end
@@ -1,14 +1,13 @@
# Copyright (c) 2009-2011 VMware, Inc.
require File.expand_path("../spec_helper", __FILE__)
-require File.expand_path("../../resource_pool/spec_helper", __FILE__)
module VCAP::CloudController
describe VCAP::CloudController::ResourceMatch do
- include_context "resource pool", FilesystemPool
+ include_context "resource pool", ResourcePool
before(:all) do
- FilesystemPool.add_directory(@tmpdir)
+ ResourcePool.add_directory(@tmpdir)
end
describe "POST /resources" do
@@ -1,14 +1,13 @@
# Copyright (c) 2009-2011 VMware, Inc.
require File.expand_path("../spec_helper", __FILE__)
-require File.expand_path("../../resource_pool/spec_helper", __FILE__)
module VCAP::CloudController
describe VCAP::CloudController::ResourceMatch do
- include_context "resource pool", FilesystemPool
+ include_context "resource pool"
before(:all) do
- FilesystemPool.add_directory(@tmpdir)
+ ResourcePool.add_directory(@tmpdir)
end
describe "PUT /v2/resource_match" do
Oops, something went wrong.

0 comments on commit c0bbedd

Please sign in to comment.