diff --git a/nova/exception.py b/nova/exception.py index 85b5fc643c8..18a4bdcb545 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1423,3 +1423,7 @@ class MissingParameter(NovaException): class PciConfigInvalidWhitelist(Invalid): mst_fmt = _("Invalid PCI devices Whitelist config %(reason)s") + + +class PciTrackerInvalidNodeId(NovaException): + mst_fmt = _("Cannot change %(node_id)s to %(new_node_id)") diff --git a/nova/pci/pci_manager.py b/nova/pci/pci_manager.py new file mode 100644 index 00000000000..ac8c4e813b2 --- /dev/null +++ b/nova/pci/pci_manager.py @@ -0,0 +1,316 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Intel, Inc. +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from nova.compute import task_states +from nova.compute import vm_states +from nova import context +from nova import exception +from nova.objects import instance +from nova.objects import pci_device +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import log as logging +from nova.pci import pci_request +from nova.pci import pci_stats +from nova.pci import pci_utils + +LOG = logging.getLogger(__name__) + + +class PciDevTracker(object): + """Manage pci devices in a compute node. + + This class fetches pci passthrough information from hypervisor + and trackes the usage of these devices. + + It's called by compute node resource tracker to allocate and free + devices to/from instances, and to update the available pci passthrough + devices information from hypervisor periodically. The devices + information is updated to DB when devices information is changed. + """ + + def __init__(self, node_id=None): + """Create a pci device tracker. + + If a node_id is passed in, it will fetch pci devices information + from database, otherwise, it will create an empty devices list + and the resource tracker will update the node_id information later. + """ + + super(PciDevTracker, self).__init__() + self.stale = {} + self.node_id = node_id + self.stats = pci_stats.PciDeviceStats() + if node_id: + self.pci_devs = pci_device.PciDeviceList.get_by_compute_node( + context, node_id) + else: + self.pci_devs = pci_device.PciDeviceList() + self._initial_instance_usage() + + def _initial_instance_usage(self): + self.allocations = collections.defaultdict(list) + self.claims = collections.defaultdict(list) + for dev in self.pci_devs: + uuid = dev['instance_uuid'] + if dev['status'] == 'claimed': + self.claims[uuid].append(dev) + elif dev['status'] == 'allocated': + self.allocations[uuid].append(dev) + elif dev['status'] == 'available': + self.stats.add_device(dev) + + def _filter_devices_for_spec(self, request_spec, pci_devs): + return [p for p in pci_devs + if pci_utils.pci_device_prop_match(p, request_spec)] + + def _get_free_devices_for_request(self, pci_request, pci_devs): + count = pci_request.get('count', 1) + spec = pci_request.get('spec', []) + devs = self._filter_devices_for_spec(spec, pci_devs) + if len(devs) < count: + return None + else: + return devs[:count] + + @property + def free_devs(self): + return [dev for dev in self.pci_devs if dev.status == 'available'] + + def get_free_devices_for_requests(self, pci_requests): + """Select free pci devices for requests + + Pci_requests is a list of pci_request dictionaries. Each dictionary + has three keys: + count: number of pci devices required, default 1 + spec: the pci properties that the devices should meet + alias_name: alias the pci_request is translated from, optional + + If any single pci_request cannot find any free devices, then the + entire request list will fail. + """ + alloc = [] + + for request in pci_requests: + available = self._get_free_devices_for_request( + request, + [p for p in self.free_devs if p not in alloc]) + if not available: + return [] + alloc.extend(available) + return alloc + + @property + def all_devs(self): + return self.pci_devs + + def save(self, context): + for dev in self.pci_devs: + if dev.obj_what_changed(): + dev.save(context) + + self.pci_devs.objects = [dev for dev in self.pci_devs + if dev['status'] != 'deleted'] + + @property + def pci_stats(self): + return self.stats + + def set_hvdevs(self, devices): + """Sync the pci device tracker with hypervisor information. + + To support pci device hot plug, we sync with the hypervisor + periodically, fetching all devices information from hypervisor, + update the tracker and sync the DB information. + + Devices should not be hot-plugged when assigned to a guest, + but possibly the hypervisor has no such guarantee. The best + we can do is to give a warning if a device is changed + or removed while assigned. + """ + + exist_addrs = set([dev['address'] for dev in self.pci_devs]) + new_addrs = set([dev['address'] for dev in devices]) + + for existed in self.pci_devs: + if existed['address'] in exist_addrs - new_addrs: + try: + existed.remove() + except exception.PciDeviceInvalidStatus as e: + LOG.warn(_("Trying to remove device with %(status)s" + "ownership %(instance_uuid)s"), existed) + # Note(yjiang5): remove the device by force so that + # db entry is cleaned in next sync. + existed.status = 'removed' + else: + # Note(yjiang5): no need to update stats if an assigned + # device is hot removed. + self.stats.consume_device(existed) + else: + new_value = next((dev for dev in devices if + dev['address'] == existed['address'])) + new_value['compute_node_id'] = self.node_id + if existed['status'] in ('claimed', 'allocated'): + # Pci properties may change while assigned because of + # hotplug or config changes. Although normally this should + # not happen. + + # As the devices have been assigned to a instance, we defer + # the change till the instance is destroyed. We will + # not sync the new properties with database before that. + + # TODO(yjiang5): Not sure if this is a right policy, but + # at least it avoids some confusion and, if needed, + # we can add more action like killing the instance + # by force in future. + self.stale[dev['address']] = dev + else: + existed.update_device(new_value) + + for dev in [dev for dev in devices if + dev['address'] in new_addrs - exist_addrs]: + dev['compute_node_id'] = self.node_id + dev_obj = pci_device.PciDevice.create(dev) + self.pci_devs.objects.append(dev_obj) + self.stats.add_device(dev_obj) + + def _claim_instance(self, instance, prefix=''): + pci_requests = pci_request.get_instance_pci_requests( + instance, prefix) + if not pci_requests: + return None + devs = self.get_free_devices_for_requests(pci_requests) + if not devs: + raise exception.PciDeviceRequestFailed(pci_requests) + for dev in devs: + dev.claim(instance) + self.stats.consume_device(dev) + return devs + + def _allocate_instance(self, instance, devs): + for dev in devs: + dev.allocate(instance) + + def _free_device(self, dev, instance=None): + dev.free(instance) + stale = self.stale.pop(dev['address'], None) + if stale: + dev.update_device(stale) + self.stats.add_device(dev) + + def _free_instance(self, instance): + # Note(yjiang5): When a instance is resized, the devices in the + # destination node are claimed to the instance in prep_resize stage. + # However, the instance contains only allocated devices + # information, not the claimed one. So we can't use + # instance['pci_devices'] to check the devices to be freed. + for dev in self.pci_devs: + if (dev['status'] in ('claimed', 'allocated') and + dev['instance_uuid'] == instance['uuid']): + self._free_device(dev) + + def update_pci_for_instance(self, instance): + """Update instance's pci usage information. + + The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock + """ + + uuid = instance['uuid'] + vm_state = instance['vm_state'] + task_state = instance['task_state'] + + if vm_state == vm_states.DELETED: + if self.allocations.pop(uuid, None): + self._free_instance(instance) + elif self.claims.pop(uuid, None): + self._free_instance(instance) + elif task_state == task_states.RESIZE_MIGRATED: + devs = self.allocations.pop(uuid, None) + if devs: + self._free_instance(instance) + elif task_state == task_states.RESIZE_FINISH: + devs = self.claims.pop(uuid, None) + if devs: + self._allocate_instance(instance, devs) + self.allocations[uuid] = devs + elif (uuid not in self.allocations and + uuid not in self.claims): + devs = self._claim_instance(instance) + if devs: + self._allocate_instance(instance, devs) + self.allocations[uuid] = devs + + def update_pci_for_migration(self, instance, sign=1): + """Update instance's pci usage information when it is migrated. + + The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock. + + :param sign: claim devices for instance when sign is 1, remove + the claims when sign is -1 + """ + uuid = instance['uuid'] + if sign == 1 and uuid not in self.claims: + devs = self._claim_instance(instance, 'new_') + self.claims[uuid] = devs + if sign == -1 and uuid in self.claims: + self._free_instance(instance) + + def clean_usage(self, instances, migrations, orphans): + """Remove all usages for instances not passed in the parameter. + + The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock + """ + existed = [inst['uuid'] for inst in instances] + existed += [mig['instance_uuid'] for mig in migrations] + existed += [inst['uuid'] for inst in orphans] + + for uuid in self.claims.keys(): + if uuid not in existed: + for dev in self.claims.pop(uuid): + self._free_device(dev) + for uuid in self.allocations.keys(): + if uuid not in existed: + for dev in self.allocations.pop(uuid): + self._free_device(dev) + + def set_compute_node_id(self, node_id): + """Set the compute node id that this object is tracking for. + + In current resource tracker implementation, the + compute_node entry is created in the last step of + update_available_resoruces, thus we have to lazily set the + compute_node_id at that time. + """ + + if self.node_id and self.node_id != node_id: + raise exception.PciTrackerInvalidNodeId(node_id=self.node_id, + new_node_id=node_id) + self.node_id = node_id + for dev in self.pci_devs: + dev.compute_node_id = node_id + + +def get_instance_pci_devs(inst): + """Get the devices assigned to the instances.""" + if isinstance(inst, instance.Instance): + return inst.pci_devices + else: + ctxt = context.get_admin_context() + return pci_device.PciDeviceList.get_by_instance_uuid( + ctxt, inst['uuid']) diff --git a/nova/tests/pci/test_pci_manager.py b/nova/tests/pci/test_pci_manager.py new file mode 100644 index 00000000000..75ab98e0ddf --- /dev/null +++ b/nova/tests/pci/test_pci_manager.py @@ -0,0 +1,337 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from nova.compute import task_states +from nova.compute import vm_states +from nova import context +from nova import db +from nova import exception +from nova.objects import instance +from nova.objects import pci_device +from nova.pci import pci_manager +from nova.pci import pci_request +from nova import test +from nova.tests.api.openstack import fakes + + +fake_pci = { + 'compute_node_id': 1, + 'address': '0000:00:00.1', + 'product_id': 'p', + 'vendor_id': 'v', + 'status': 'available'} +fake_pci_1 = dict(fake_pci, address='0000:00:00.2', + product_id='p1', vendor_id='v1') +fake_pci_2 = dict(fake_pci, address='0000:00:00.3') + + +fake_db_dev = { + 'created_at': None, + 'updated_at': None, + 'deleted_at': None, + 'deleted': None, + 'id': 1, + 'compute_node_id': 1, + 'address': '0000:00:00.1', + 'product_id': 'p', + 'vendor_id': 'v', + 'status': 'available', + 'extra_info': '{}', + } +fake_db_dev_1 = dict(fake_db_dev, vendor_id='v1', + product_id='p1', id=2, + address='0000:00:00.2') +fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3') +fake_db_devs = [fake_db_dev, fake_db_dev_1, fake_db_dev_2] + + +fake_pci_requests = [ + {'count': 1, + 'spec': [{'vendor_id': 'v'}]}, + {'count': 1, + 'spec': [{'vendor_id': 'v1'}]}] + + +class PciDevTrackerTestCase(test.TestCase): + def _create_fake_instance(self): + self.inst = instance.Instance() + self.inst.uuid = 'fake-inst-uuid' + self.inst.pci_devices = pci_device.PciDeviceList() + self.inst.vm_state = vm_states.ACTIVE + self.inst.task_state = None + + def _fake_get_pci_devices(self, ctxt, node_id): + return fake_db_devs[:] + + def _fake_pci_device_update(self, ctxt, node_id, address, value): + self.update_called += 1 + self.called_values = value + fake_return = copy.deepcopy(fake_db_dev) + return fake_return + + def _fake_pci_device_destroy(self, ctxt, node_id, address): + self.destroy_called += 1 + + def _fake_get_instance_pci_requests(self, instance, prefix=''): + return self.pci_requests + + def setUp(self): + super(PciDevTrackerTestCase, self).setUp() + self.stubs.Set(db, 'pci_device_get_all_by_node', + self._fake_get_pci_devices) + self.stubs.Set(pci_request, 'get_instance_pci_requests', + self._fake_get_instance_pci_requests) + self._create_fake_instance() + self.tracker = pci_manager.PciDevTracker(1) + + def test_pcidev_tracker_create(self): + self.assertEqual(len(self.tracker.pci_devs), 3) + self.assertEqual(len(self.tracker.free_devs), 3) + self.assertEqual(self.tracker.stale.keys(), []) + self.assertEqual(len(self.tracker.stats.pools), 2) + self.assertEqual(self.tracker.node_id, 1) + + def test_pcidev_tracker_create_no_nodeid(self): + self.tracker = pci_manager.PciDevTracker() + self.assertEqual(len(self.tracker.pci_devs), 0) + + def test_get_free_devices_for_requests(self): + devs = self.tracker.get_free_devices_for_requests(fake_pci_requests) + self.assertEqual(len(devs), 2) + self.assertEqual(set([dev['vendor_id'] for dev in devs]), + set(['v1', 'v'])) + + def test_get_free_devices_for_requests_empty(self): + devs = self.tracker.get_free_devices_for_requests([]) + self.assertEqual(len(devs), 0) + + def test_get_free_devices_for_requests_meet_partial(self): + requests = copy.deepcopy(fake_pci_requests) + requests[1]['count'] = 2 + requests[1]['spec'][0]['vendor_id'] = 'v' + devs = self.tracker.get_free_devices_for_requests(requests) + self.assertEqual(len(devs), 0) + + def test_set_hvdev_new_dev(self): + fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2') + fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1), + copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)] + self.tracker.set_hvdevs(fake_pci_devs) + self.assertEqual(len(self.tracker.pci_devs), 4) + self.assertEqual(set([dev['address'] for + dev in self.tracker.pci_devs]), + set(['0000:00:00.1', '0000:00:00.2', + '0000:00:00.3', '0000:00:00.4'])) + self.assertEqual(set([dev['vendor_id'] for + dev in self.tracker.pci_devs]), + set(['v', 'v1', 'v2'])) + + def test_set_hvdev_changed(self): + fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1') + fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2), + copy.deepcopy(fake_pci_v2)] + self.tracker.set_hvdevs(fake_pci_devs) + self.assertEqual(set([dev['vendor_id'] for + dev in self.tracker.pci_devs]), + set(['v', 'v1'])) + + def test_set_hvdev_remove(self): + self.tracker.set_hvdevs([fake_pci]) + self.assertEqual(len([dev for dev in self.tracker.pci_devs + if dev['status'] == 'removed']), + 2) + + def test_set_hvdev_changed_stal(self): + self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}] + self.tracker._claim_instance(self.inst) + fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2') + fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2), + copy.deepcopy(fake_pci_3)] + self.tracker.set_hvdevs(fake_pci_devs) + self.assertEqual(len(self.tracker.stale), 1) + self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2') + + def test_update_pci_for_instance_active(self): + self.pci_requests = fake_pci_requests + self.tracker.update_pci_for_instance(self.inst) + self.assertEqual(len(self.tracker.free_devs), 1) + self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v') + + def test_update_pci_for_instance_fail(self): + self.pci_requests = copy.deepcopy(fake_pci_requests) + self.pci_requests[0]['count'] = 4 + self.assertRaises(exception.PciDeviceRequestFailed, + self.tracker.update_pci_for_instance, + self.inst) + + def test_update_pci_for_instance_deleted(self): + self.pci_requests = fake_pci_requests + self.tracker.update_pci_for_instance(self.inst) + self.assertEqual(len(self.tracker.free_devs), 1) + self.inst.vm_state = vm_states.DELETED + self.tracker.update_pci_for_instance(self.inst) + self.assertEqual(len(self.tracker.free_devs), 3) + self.assertEqual(set([dev['vendor_id'] for + dev in self.tracker.pci_devs]), + set(['v', 'v1'])) + + def test_update_pci_for_instance_resize_source(self): + self.pci_requests = fake_pci_requests + self.tracker.update_pci_for_instance(self.inst) + self.assertEqual(len(self.tracker.free_devs), 1) + self.inst.task_state = task_states.RESIZE_MIGRATED + self.tracker.update_pci_for_instance(self.inst) + self.assertEqual(len(self.tracker.free_devs), 3) + + def test_update_pci_for_instance_resize_dest(self): + self.pci_requests = fake_pci_requests + self.tracker.update_pci_for_migration(self.inst) + self.assertEqual(len(self.tracker.free_devs), 1) + self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2) + self.assertFalse('fake-inst-uuid' in self.tracker.allocations) + self.inst.task_state = task_states.RESIZE_FINISH + self.tracker.update_pci_for_instance(self.inst) + self.assertEqual(len(self.tracker.allocations['fake-inst-uuid']), 2) + self.assertFalse('fake-inst-uuid' in self.tracker.claims) + + def test_update_pci_for_migration_in(self): + self.pci_requests = fake_pci_requests + self.tracker.update_pci_for_migration(self.inst) + self.assertEqual(len(self.tracker.free_devs), 1) + self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v') + + def test_update_pci_for_migration_out(self): + self.pci_requests = fake_pci_requests + self.tracker.update_pci_for_migration(self.inst) + self.tracker.update_pci_for_migration(self.inst, sign=-1) + self.assertEqual(len(self.tracker.free_devs), 3) + self.assertEqual(set([dev['vendor_id'] for + dev in self.tracker.pci_devs]), + set(['v', 'v1'])) + + def test_save(self): + self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update) + ctxt = context.get_admin_context() + fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3') + fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2), + copy.deepcopy(fake_pci_v3)] + self.tracker.set_hvdevs(fake_pci_devs) + self.update_called = 0 + self.tracker.save(ctxt) + self.assertEqual(self.update_called, 3) + + def test_save_removed(self): + self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update) + self.stubs.Set(db, "pci_device_destroy", self._fake_pci_device_destroy) + self.destroy_called = 0 + ctxt = context.get_admin_context() + self.assertEqual(len(self.tracker.pci_devs), 3) + dev = self.tracker.pci_devs.objects[0] + self.update_called = 0 + dev.remove() + self.tracker.save(ctxt) + self.assertEqual(len(self.tracker.pci_devs), 2) + self.assertEqual(self.destroy_called, 1) + + def test_set_compute_node_id(self): + self.tracker = pci_manager.PciDevTracker() + fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1), + copy.deepcopy(fake_pci_2)] + self.tracker.set_hvdevs(fake_pci_devs) + self.tracker.set_compute_node_id(1) + self.assertEqual(self.tracker.node_id, 1) + self.assertEqual(self.tracker.pci_devs[0].compute_node_id, 1) + fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2') + fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1), + copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_3)] + self.tracker.set_hvdevs(fake_pci_devs) + for dev in self.tracker.pci_devs: + self.assertEqual(dev.compute_node_id, 1) + + def test_clean_usage(self): + inst_2 = copy.copy(self.inst) + inst_2.uuid = 'uuid5' + inst = {'uuid': 'uuid1', 'vm_state': vm_states.BUILDING} + migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING} + orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING} + + self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v'}]}] + self.tracker.update_pci_for_instance(self.inst) + self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}] + self.tracker.update_pci_for_instance(inst_2) + self.assertEqual(len(self.tracker.free_devs), 1) + self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v') + + self.tracker.clean_usage([self.inst], [migr], [orph]) + self.assertEqual(len(self.tracker.free_devs), 2) + self.assertEqual( + set([dev['vendor_id'] for dev in self.tracker.free_devs]), + set(['v', 'v1'])) + + def test_clean_usage_claims(self): + inst_2 = copy.copy(self.inst) + inst_2.uuid = 'uuid5' + inst = {'uuid': 'uuid1', 'vm_state': vm_states.BUILDING} + migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING} + orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING} + + self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v'}]}] + self.tracker.update_pci_for_instance(self.inst) + self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}] + self.tracker.update_pci_for_migration(inst_2) + self.assertEqual(len(self.tracker.free_devs), 1) + self.tracker.clean_usage([self.inst], [migr], [orph]) + self.assertEqual(len(self.tracker.free_devs), 2) + self.assertEqual( + set([dev['vendor_id'] for dev in self.tracker.free_devs]), + set(['v', 'v1'])) + + +class PciGetInstanceDevs(test.TestCase): + def test_get_devs_non_object(self): + def _fake_pci_device_get_by_instance_uuid(context, uuid): + self._get_by_uuid = True + return [] + + instance = fakes.stub_instance(id=1) + self.stubs.Set(db, 'pci_device_get_all_by_instance_uuid', + _fake_pci_device_get_by_instance_uuid) + self._get_by_uuid = False + devices = pci_manager.get_instance_pci_devs(instance) + self.assertEqual(self._get_by_uuid, True) + + def test_get_devs_object(self): + def _fake_obj_load_attr(foo, attrname): + if attrname == 'pci_devices': + self.load_attr_called = True + foo.pci_devices = None + + inst = fakes.stub_instance(id='1') + ctxt = context.get_admin_context() + self.mox.StubOutWithMock(db, 'instance_get') + db.instance_get(ctxt, '1', columns_to_join=[] + ).AndReturn(inst) + self.mox.ReplayAll() + inst = instance.Instance.get_by_id(ctxt, '1', expected_attrs=[]) + self.stubs.Set(instance.Instance, 'obj_load_attr', + _fake_obj_load_attr) + + self.load_attr_called = False + devices = pci_manager.get_instance_pci_devs(inst) + self.assertEqual(self.load_attr_called, True)