From 3a28a184a0bc0192f1f967e7a92d5e101c6edf76 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 13 Feb 2015 09:40:55 -0800 Subject: [PATCH] Replace RPC topic-based service queries with binary-based in scheduler This makes the nova/scheduler subtree query for compute services by binary name instead of RPC topic. Change-Id: I2633aff583f3036bd4d0b71edd547f46f29039e1 --- nova/scheduler/host_manager.py | 10 ++- .../unit/scheduler/test_filter_scheduler.py | 20 +++--- .../tests/unit/scheduler/test_host_manager.py | 63 +++++++++---------- .../scheduler/test_ironic_host_manager.py | 30 ++++----- 4 files changed, 57 insertions(+), 66 deletions(-) diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index 44adcadbde1..4ac5b8bdffc 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -64,7 +64,6 @@ CONF = cfg.CONF CONF.register_opts(host_manager_opts) -CONF.import_opt('compute_topic', 'nova.compute.rpcapi') LOG = logging.getLogger(__name__) @@ -463,8 +462,8 @@ def get_all_host_states(self, context): """ service_refs = {service.host: service - for service in objects.ServiceList.get_by_topic( - context, CONF.compute_topic)} + for service in objects.ServiceList.get_by_binary( + context, 'nova-compute')} # Get resource usage across the available compute nodes: compute_nodes = objects.ComputeNodeList.get_all(context) seen_nodes = set() @@ -473,9 +472,8 @@ def get_all_host_states(self, context): if not service: LOG.warning(_LW( - "No service record found for host %(host)s " - "on %(topic)s topic"), - {'host': compute.host, 'topic': CONF.compute_topic}) + "No compute service record found for host %(host)s"), + {'host': compute.host}) continue host = compute.host node = compute.hypervisor_hostname diff --git a/nova/tests/unit/scheduler/test_filter_scheduler.py b/nova/tests/unit/scheduler/test_filter_scheduler.py index 2faa61bf539..8e3b55e50f0 100644 --- a/nova/tests/unit/scheduler/test_filter_scheduler.py +++ b/nova/tests/unit/scheduler/test_filter_scheduler.py @@ -36,7 +36,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): driver_cls = filter_scheduler.FilterScheduler - @mock.patch('nova.objects.ServiceList.get_by_topic', + @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @@ -44,7 +44,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_happy_day(self, mock_get_extra, mock_get_all, - mock_get_by_topic): + mock_get_by_binary): """Make sure there's nothing glaringly wrong with _schedule() by doing a happy day pass through. """ @@ -114,7 +114,7 @@ def test_post_select_populate(self): self.assertEqual({'vcpus': 5}, host_state.limits) - @mock.patch('nova.objects.ServiceList.get_by_topic', + @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @@ -122,7 +122,7 @@ def test_post_select_populate(self): return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_host_pool(self, mock_get_extra, mock_get_all, - mock_get_by_topic): + mock_get_by_binary): """Make sure the scheduler_host_subset_size property works properly.""" self.flags(scheduler_host_subset_size=2) @@ -147,7 +147,7 @@ def test_schedule_host_pool(self, mock_get_extra, mock_get_all, # one host should be chosen self.assertEqual(len(hosts), 1) - @mock.patch('nova.objects.ServiceList.get_by_topic', + @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @@ -155,7 +155,7 @@ def test_schedule_host_pool(self, mock_get_extra, mock_get_all, return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all, - mock_get_by_topic): + mock_get_by_binary): """Hosts should still be chosen if pool size is larger than number of filtered hosts. """ @@ -181,7 +181,7 @@ def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all, # one host should be chose self.assertEqual(len(hosts), 1) - @mock.patch('nova.objects.ServiceList.get_by_topic', + @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @@ -189,7 +189,7 @@ def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all, return_value={'numa_topology': None, 'pci_requests': None}) def test_schedule_chooses_best_host(self, mock_get_extra, mock_get_all, - mock_get_by_topic): + mock_get_by_binary): """If scheduler_host_subset_size is 1, the largest host with greatest weight should be returned. """ @@ -230,7 +230,7 @@ def _fake_weigh_objects(_self, functions, hosts, options): self.assertEqual(50, hosts[0].weight) - @mock.patch('nova.objects.ServiceList.get_by_topic', + @mock.patch('nova.objects.ServiceList.get_by_binary', return_value=fakes.SERVICES) @mock.patch('nova.objects.ComputeNodeList.get_all', return_value=fakes.COMPUTE_NODES) @@ -238,7 +238,7 @@ def _fake_weigh_objects(_self, functions, hosts, options): return_value={'numa_topology': None, 'pci_requests': None}) def test_select_destinations(self, mock_get_extra, mock_get_all, - mock_get_by_topic): + mock_get_by_binary): """select_destinations is basically a wrapper around _schedule(). Similar to the _schedule tests, this just does a happy path test to diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py index 4d3c7f334e9..f5a627da27b 100644 --- a/nova/tests/unit/scheduler/test_host_manager.py +++ b/nova/tests/unit/scheduler/test_host_manager.py @@ -19,7 +19,6 @@ import collections import mock -from oslo_config import cfg from oslo_serialization import jsonutils import six @@ -35,9 +34,6 @@ from nova.tests.unit.scheduler import fakes from nova import utils -CONF = cfg.CONF -CONF.import_opt('compute_topic', 'nova.compute.rpcapi') - class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): @@ -313,12 +309,12 @@ def test_get_all_host_states(self): context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warning') - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES) # node 3 host physical disk space is greater than database host_manager.LOG.warning("Host %(hostname)s has more disk space " @@ -327,9 +323,9 @@ def test_get_all_host_states(self): {'physical': 3333, 'database': 3072, 'hostname': 'node3'}) # Invalid service - host_manager.LOG.warning("No service record found for host %(host)s " - "on %(topic)s topic", - {'host': 'fake', 'topic': CONF.compute_topic}) + host_manager.LOG.warning("No compute service record found for " + "host %(host)s", + {'host': 'fake'}) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map @@ -371,10 +367,10 @@ def test_get_all_host_states(self): @mock.patch.object(host_manager.HostState, 'update_from_compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all') - @mock.patch.object(objects.ServiceList, 'get_by_topic') - def test_get_all_host_states_with_no_aggs(self, svc_get_by_topic, + @mock.patch.object(objects.ServiceList, 'get_by_binary') + def test_get_all_host_states_with_no_aggs(self, svc_get_by_binary, cn_get_all, update_from_cn): - svc_get_by_topic.return_value = [objects.Service(host='fake')] + svc_get_by_binary.return_value = [objects.Service(host='fake')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake')] @@ -386,11 +382,11 @@ def test_get_all_host_states_with_no_aggs(self, svc_get_by_topic, @mock.patch.object(host_manager.HostState, 'update_from_compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all') - @mock.patch.object(objects.ServiceList, 'get_by_topic') - def test_get_all_host_states_with_matching_aggs(self, svc_get_by_topic, + @mock.patch.object(objects.ServiceList, 'get_by_binary') + def test_get_all_host_states_with_matching_aggs(self, svc_get_by_binary, cn_get_all, update_from_cn): - svc_get_by_topic.return_value = [objects.Service(host='fake')] + svc_get_by_binary.return_value = [objects.Service(host='fake')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake')] fake_agg = objects.Aggregate(id=1) @@ -404,12 +400,13 @@ def test_get_all_host_states_with_matching_aggs(self, svc_get_by_topic, @mock.patch.object(host_manager.HostState, 'update_from_compute_node') @mock.patch.object(objects.ComputeNodeList, 'get_all') - @mock.patch.object(objects.ServiceList, 'get_by_topic') - def test_get_all_host_states_with_not_matching_aggs(self, svc_get_by_topic, + @mock.patch.object(objects.ServiceList, 'get_by_binary') + def test_get_all_host_states_with_not_matching_aggs(self, + svc_get_by_binary, cn_get_all, update_from_cn): - svc_get_by_topic.return_value = [objects.Service(host='fake'), - objects.Service(host='other')] + svc_get_by_binary.return_value = [objects.Service(host='fake'), + objects.Service(host='other')] cn_get_all.return_value = [ objects.ComputeNode(host='fake', hypervisor_hostname='fake'), objects.ComputeNode(host='other', hypervisor_hostname='other')] @@ -440,10 +437,10 @@ def setUp(self): def test_get_all_host_states(self): context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES) self.mox.ReplayAll() @@ -454,17 +451,17 @@ def test_get_all_host_states(self): def test_get_all_host_states_after_delete_one(self): context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') # all nodes active for first call - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES) # remove node4 for second call running_nodes = [n for n in fakes.COMPUTE_NODES if n.get('hypervisor_hostname') != 'node4'] - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(running_nodes) self.mox.ReplayAll() @@ -476,15 +473,15 @@ def test_get_all_host_states_after_delete_one(self): def test_get_all_host_states_after_delete_all(self): context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') # all nodes active for first call - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES) # remove all nodes for second call - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn([]) self.mox.ReplayAll() diff --git a/nova/tests/unit/scheduler/test_ironic_host_manager.py b/nova/tests/unit/scheduler/test_ironic_host_manager.py index d2925a9c0df..cbca1df1527 100644 --- a/nova/tests/unit/scheduler/test_ironic_host_manager.py +++ b/nova/tests/unit/scheduler/test_ironic_host_manager.py @@ -18,7 +18,6 @@ """ import mock -from oslo_config import cfg from nova import exception from nova import objects @@ -29,9 +28,6 @@ from nova import test from nova.tests.unit.scheduler import ironic_fakes -CONF = cfg.CONF -CONF.import_opt('compute_topic', 'nova.compute.rpcapi') - class FakeFilterClass1(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): @@ -68,10 +64,10 @@ def test_get_all_host_states(self): # Ensure .service is set and we have the values we expect to. context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn( ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() @@ -138,18 +134,18 @@ def test_create_non_ironic_host_state(self, init_mock): def test_get_all_host_states_after_delete_one(self): context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') # all nodes active for first call - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn( ironic_fakes.COMPUTE_NODES) # remove node4 for second call running_nodes = [n for n in ironic_fakes.COMPUTE_NODES if n.get('hypervisor_hostname') != 'node4uuid'] - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(running_nodes) self.mox.ReplayAll() @@ -161,16 +157,16 @@ def test_get_all_host_states_after_delete_one(self): def test_get_all_host_states_after_delete_all(self): context = 'fake_context' - self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic') + self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') # all nodes active for first call - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn( ironic_fakes.COMPUTE_NODES) # remove all nodes for second call - objects.ServiceList.get_by_topic( - context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES) + objects.ServiceList.get_by_binary( + context, 'nova-compute').AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn([]) self.mox.ReplayAll()