Skip to content

Commit

Permalink
pyflakes cleanups on libvirt/connection.py
Browse files Browse the repository at this point in the history
This happens to fix another issue I ran into where boto was being
required when trying to start up the compute manager.  This is because
virt/connection.py would import virt/libvirt/connection.py which would
import nova.auth.manager which would import nova.auth.signer which would
try to import boto.

Fortunately, we don't need to import nova.auth.manager in
libvirt/connection.py as it's no longer required.

Change-Id: I85f8e874ef3dc6f53667918ae081512115608d83
  • Loading branch information
comstud committed Mar 8, 2012
1 parent 70f0ea5 commit e347d1a
Showing 1 changed file with 11 additions and 22 deletions.
33 changes: 11 additions & 22 deletions nova/virt/libvirt/connection.py
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
from xml.dom import minidom from xml.dom import minidom
from xml.etree import ElementTree from xml.etree import ElementTree


from nova.auth import manager
from nova import block_device from nova import block_device
from nova.compute import instance_types from nova.compute import instance_types
from nova.compute import power_state from nova.compute import power_state
Expand All @@ -63,10 +62,8 @@
import nova.image import nova.image
from nova import log as logging from nova import log as logging
from nova.openstack.common import cfg from nova.openstack.common import cfg
from nova import network
from nova import utils from nova import utils
from nova.virt import driver from nova.virt import driver
from nova.virt import images
from nova.virt.disk import api as disk from nova.virt.disk import api as disk
from nova.virt.libvirt import firewall from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagecache from nova.virt.libvirt import imagecache
Expand Down Expand Up @@ -298,7 +295,7 @@ def get_num_instances(self):
def instance_exists(self, instance_id): def instance_exists(self, instance_id):
"""Efficient override of base instance_exists method.""" """Efficient override of base instance_exists method."""
try: try:
_ignored = self._conn.lookupByName(instance_id) self._conn.lookupByName(instance_id)
return True return True
except libvirt.libvirtError: except libvirt.libvirtError:
return False return False
Expand Down Expand Up @@ -399,7 +396,7 @@ def _destroy(self, instance, network_info, block_device_info=None,
def _wait_for_destroy(): def _wait_for_destroy():
"""Called at an interval until the VM is gone.""" """Called at an interval until the VM is gone."""
try: try:
state = self.get_info(instance)['state'] self.get_info(instance)
except exception.NotFound: except exception.NotFound:
LOG.info(_("Instance destroyed successfully."), LOG.info(_("Instance destroyed successfully."),
instance=instance) instance=instance)
Expand All @@ -417,9 +414,9 @@ def _wait_for_destroy():
for vol in block_device_mapping: for vol in block_device_mapping:
connection_info = vol['connection_info'] connection_info = vol['connection_info']
mountpoint = vol['mount_device'] mountpoint = vol['mount_device']
xml = self.volume_driver_method('disconnect_volume', self.volume_driver_method('disconnect_volume',
connection_info, connection_info,
mountpoint) mountpoint)
if cleanup: if cleanup:
self._cleanup(instance) self._cleanup(instance)


Expand All @@ -431,7 +428,6 @@ def destroy(self, instance, network_info, block_device_info=None):


def _cleanup(self, instance): def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name']) target = os.path.join(FLAGS.instances_path, instance['name'])
instance_name = instance['name']
LOG.info(_('Deleting instance files %(target)s') % locals(), LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance) instance=instance)
if FLAGS.libvirt_type == 'lxc': if FLAGS.libvirt_type == 'lxc':
Expand Down Expand Up @@ -541,7 +537,6 @@ def _detach_lxc_volume(self, xml, virt_dom, instance_name):
LOG.info(_('detaching LXC block device')) LOG.info(_('detaching LXC block device'))


lxc_container_root = self.get_lxc_container_root(virt_dom) lxc_container_root = self.get_lxc_container_root(virt_dom)
lxc_host_volume = self.get_lxc_host_device(xml)
lxc_container_device = self.get_lxc_container_target(xml) lxc_container_device = self.get_lxc_container_target(xml)
lxc_container_target = "%s/%s" % (lxc_container_root, lxc_container_target = "%s/%s" % (lxc_container_root,
lxc_container_device) lxc_container_device)
Expand Down Expand Up @@ -858,7 +853,7 @@ def spawn(self, context, instance, image_meta, network_info,
self._create_image(context, instance, xml, network_info=network_info, self._create_image(context, instance, xml, network_info=network_info,
block_device_info=block_device_info) block_device_info=block_device_info)


domain = self._create_new_domain(xml) self._create_new_domain(xml)
LOG.debug(_("Instance is running"), instance=instance) LOG.debug(_("Instance is running"), instance=instance)
self._enable_hairpin(instance) self._enable_hairpin(instance)
self.firewall_driver.apply_instance_filter(instance, network_info) self.firewall_driver.apply_instance_filter(instance, network_info)
Expand Down Expand Up @@ -1185,7 +1180,6 @@ def basepath(fname='', suffix=suffix):
ifc_template = open(FLAGS.injected_network_template).read() ifc_template = open(FLAGS.injected_network_template).read()
ifc_num = -1 ifc_num = -1
have_injected_networks = False have_injected_networks = False
admin_context = nova_context.get_admin_context()
for (network_ref, mapping) in network_info: for (network_ref, mapping) in network_info:
ifc_num += 1 ifc_num += 1


Expand Down Expand Up @@ -1226,9 +1220,6 @@ def basepath(fname='', suffix=suffix):
admin_password = None admin_password = None


if any((key, net, metadata, admin_password)): if any((key, net, metadata, admin_password)):

instance_name = instance['name']

if config_drive: # Should be True or None by now. if config_drive: # Should be True or None by now.
injection_path = basepath('disk.config') injection_path = basepath('disk.config')
img_id = 'config-drive' img_id = 'config-drive'
Expand Down Expand Up @@ -1385,7 +1376,6 @@ def _prepare_xml_info(self, instance, network_info, image_meta, rescue,
nova_context.get_admin_context(), instance['id'], nova_context.get_admin_context(), instance['id'],
{'default_swap_device': '/dev/' + swap_device}) {'default_swap_device': '/dev/' + swap_device})


config_drive = False
if instance.get('config_drive') or instance.get('config_drive_id'): if instance.get('config_drive') or instance.get('config_drive_id'):
xml_info['config_drive'] = xml_info['basepath'] + "/disk.config" xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"


Expand Down Expand Up @@ -1968,9 +1958,9 @@ def pre_live_migration(self, block_device_info):
for vol in block_device_mapping: for vol in block_device_mapping:
connection_info = vol['connection_info'] connection_info = vol['connection_info']
mountpoint = vol['mount_device'] mountpoint = vol['mount_device']
xml = self.volume_driver_method('connect_volume', self.volume_driver_method('connect_volume',
connection_info, connection_info,
mountpoint) mountpoint)


def pre_block_migration(self, ctxt, instance_ref, disk_info_json): def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
"""Preparation block migration. """Preparation block migration.
Expand Down Expand Up @@ -2287,8 +2277,7 @@ def finish_migration(self, context, migration, instance, disk_info,
network_info=network_info, network_info=network_info,
block_device_info=None) block_device_info=None)


domain = self._create_new_domain(xml) self._create_new_domain(xml)

self.firewall_driver.apply_instance_filter(instance, network_info) self.firewall_driver.apply_instance_filter(instance, network_info)


timer = utils.LoopingCall(self._wait_for_running, instance) timer = utils.LoopingCall(self._wait_for_running, instance)
Expand All @@ -2310,7 +2299,7 @@ def finish_revert_migration(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info)
# images already exist # images already exist
domain = self._create_new_domain(xml) self._create_new_domain(xml)
self.firewall_driver.apply_instance_filter(instance, network_info) self.firewall_driver.apply_instance_filter(instance, network_info)


timer = utils.LoopingCall(self._wait_for_running, instance) timer = utils.LoopingCall(self._wait_for_running, instance)
Expand Down

0 comments on commit e347d1a

Please sign in to comment.