Skip to content

Commit

Permalink
Only batch fetch when there is a sufficiently large number requested (#…
Browse files Browse the repository at this point in the history
…316)

* Don't warm cache when the number of resources to fetch is low
* Lots of tests including edge cases.
  • Loading branch information
dturn committed Jul 27, 2018
1 parent ce947cd commit 7b47ed8
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 14 deletions.
13 changes: 9 additions & 4 deletions lib/kubernetes-deploy/sync_mediator.rb
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# frozen_string_literal: true
module KubernetesDeploy
class SyncMediator
LARGE_BATCH_THRESHOLD = Concurrency::MAX_THREADS * 3

def initialize(namespace:, context:, logger:)
@namespace = namespace
@context = context
Expand Down Expand Up @@ -29,11 +31,14 @@ def get_all(kind, selector = nil)

def sync(resources)
clear_cache
dependencies = resources.map(&:class).uniq.flat_map do |c|
c::SYNC_DEPENDENCIES if c.const_defined?('SYNC_DEPENDENCIES')

if resources.count > LARGE_BATCH_THRESHOLD
dependencies = resources.map(&:class).uniq.flat_map do |c|
c::SYNC_DEPENDENCIES if c.const_defined?('SYNC_DEPENDENCIES')
end
kinds = (resources.map(&:type) + dependencies).compact.uniq
kinds.each { |kind| fetch_by_kind(kind) }
end
kinds = (resources.map(&:type) + dependencies).compact.uniq
kinds.each { |kind| fetch_by_kind(kind) }

KubernetesDeploy::Concurrency.split_across_threads(resources) do |r|
r.sync(dup)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def test_status_not_found_for_all_types_before_exist
build_service(service_fixture('zero-replica'))
]

stub_kubectl_response("get", "Service", "-a", "--output=json", resp: { items: [] })
stub_kubectl_response("get", "Service", "zero-replica", "-a", "--output=json", resp: {})
stub_kubectl_response("get", "Service", "standard", "-a", "--output=json", resp: {})
stub_kubectl_response("get", "Service", "external-name", "-a", "--output=json", resp: {})
stub_kubectl_response("get", "Deployment", "-a", "--output=json", resp: { items: deployment_fixtures })
stub_kubectl_response("get", "Pod", "-a", "--output=json", resp: { items: pod_fixtures })

Expand Down
27 changes: 18 additions & 9 deletions test/unit/sync_mediator_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ def test_sync_caches_the_types_of_the_instances_given
all_cm = [@fake_cm.kubectl_response, @fake_cm2.kubectl_response]
stub_kubectl_response('get', 'FakeConfigMap', *@params, resp: { "items" => all_cm }, times: 1)
stub_kubectl_response('get', 'FakePod', *@params, resp: { "items" => [@fake_pod.kubectl_response] }, times: 1)
mediator.sync([@fake_cm, @fake_pod])
# Cache is only warmed if we batch fetch
mediator.sync([@fake_cm, @fake_pod] * KubernetesDeploy::SyncMediator::LARGE_BATCH_THRESHOLD)

# These should use the warm cache
assert_equal @fake_cm.kubectl_response, mediator.get_instance('FakeConfigMap', @fake_cm.name)
Expand All @@ -106,7 +107,9 @@ def test_sync_caches_the_types_of_the_dependencies_of_the_instances_given
stub_kubectl_response('get', 'FakeDeployment', *@params,
resp: { "items" => [@fake_deployment.kubectl_response] }, times: 1)
stub_kubectl_response('get', 'FakePod', *@params, resp: { "items" => [@fake_pod.kubectl_response] }, times: 1)
mediator.sync([@fake_deployment]) # pod is a depedency so should get cached too
# Dependency fetching is only done if we batch fetch
# pod is a depedency so should get cached too
mediator.sync([@fake_deployment] * (KubernetesDeploy::SyncMediator::LARGE_BATCH_THRESHOLD + 1))

assert_equal @fake_deployment.kubectl_response, mediator.get_instance('FakeDeployment', @fake_deployment.name)
assert_equal @fake_pod.kubectl_response, mediator.get_instance('FakePod', @fake_pod.name)
Expand All @@ -118,20 +121,26 @@ def test_calling_instance_sync_does_not_allow_instances_to_affect_the_global_cac
bad_citizen = BadCitizen.new('foo')
stub_kubectl_response('get', 'BadCitizen', *@params, resp: { "items" => [bad_citizen.kubectl_response] }, times: 1)

mediator.sync([bad_citizen])
# Cache is only warmed if we batch fetch
mediator.sync([bad_citizen] * (KubernetesDeploy::SyncMediator::LARGE_BATCH_THRESHOLD + 1))
assert_equal bad_citizen.kubectl_response, mediator.get_instance('BadCitizen', bad_citizen.name) # still cached
end

def test_sync_calls_sync_on_each_instance
stub_kubectl_response('get', 'FakeDeployment', *@params,
resp: { "items" => [@fake_deployment.kubectl_response] }, times: 1)
stub_kubectl_response('get', 'FakePod', *@params, resp: { "items" => [@fake_pod.kubectl_response] }, times: 1)
all_cm = [@fake_cm.kubectl_response, @fake_cm2.kubectl_response]
stub_kubectl_response('get', 'FakeConfigMap', *@params, resp: { "items" => all_cm }, times: 1)
test_resources = [@fake_pod, @fake_cm, @fake_deployment]
test_resources.each { |r| r.expects(:sync).once }
mediator.sync(test_resources)
end

def test_sync_does_not_warm_cache_with_few_resources
KubernetesDeploy::Kubectl.any_instance.expects(:run).with('get', 'FakeConfigMap', *@params).never

test_resources = [@fake_pod, @fake_cm, @fake_cm2, @fake_deployment]
test_resources = [@fake_cm] * KubernetesDeploy::SyncMediator::LARGE_BATCH_THRESHOLD
test_resources.each { |r| r.expects(:sync).once }
mediator.sync(test_resources)

stub_kubectl_response('get', 'FakeConfigMap', @fake_cm.name, *@params, resp: @fake_cm.kubectl_response, times: 1)
mediator.get_instance('FakeConfigMap', @fake_cm.name)
end

private
Expand Down

0 comments on commit 7b47ed8

Please sign in to comment.