Skip to content

Commit

Permalink
Backslash continuations (nova.virt)
Browse files Browse the repository at this point in the history
Fixes bug #925283

Backslash continuations removal for package nova.virt

Change-Id: Ia47eae3b80d90cdac044e2f875d7ece8ed9ad715
  • Loading branch information
Zhongyue Luo committed Feb 8, 2012
1 parent 27ac9d5 commit 5adaf44
Show file tree
Hide file tree
Showing 21 changed files with 168 additions and 190 deletions.
7 changes: 3 additions & 4 deletions nova/virt/firewall.py
Expand Up @@ -28,10 +28,9 @@

LOG = logging.getLogger("nova.virt.firewall")

allow_same_net_traffic_opt = \
cfg.BoolOpt('allow_same_net_traffic',
default=True,
help='Whether to allow network traffic from same network')
allow_same_net_traffic_opt = cfg.BoolOpt('allow_same_net_traffic',
default=True,
help='Whether to allow network traffic from same network')

FLAGS = flags.FLAGS
FLAGS.add_option(allow_same_net_traffic_opt)
Expand Down
14 changes: 7 additions & 7 deletions nova/virt/libvirt/connection.py
Expand Up @@ -251,9 +251,9 @@ def _test_connection(self):
self._wrapped_conn.getCapabilities()
return True
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC):
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
Expand Down Expand Up @@ -496,8 +496,8 @@ def snapshot(self, context, instance, image_href):
(image_service, image_id) = nova.image.get_image_service(
context, instance['image_ref'])
base = image_service.show(context, image_id)
(snapshot_image_service, snapshot_image_id) = \
nova.image.get_image_service(context, image_href)
_image_service = nova.image.get_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)

metadata = {'is_public': False,
Expand Down Expand Up @@ -2037,8 +2037,8 @@ def update_status(self):
data["disk_used"] = self.connection.get_local_gb_used()
data["disk_available"] = data["disk_total"] - data["disk_used"]
data["host_memory_total"] = self.connection.get_memory_mb_total()
data["host_memory_free"] = data["host_memory_total"] - \
self.connection.get_memory_mb_used()
data["host_memory_free"] = (data["host_memory_total"] -
self.connection.get_memory_mb_used())
data["hypervisor_type"] = self.connection.get_hypervisor_type()
data["hypervisor_version"] = self.connection.get_hypervisor_version()

Expand Down
54 changes: 27 additions & 27 deletions nova/virt/libvirt/firewall.py
Expand Up @@ -255,18 +255,19 @@ def unfilter_instance(self, instance, network_info):
instance_filter_name = self._instance_filter_name(instance, nic_id)

try:
self._conn.nwfilterLookupByName(instance_filter_name).\
undefine()
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
except libvirt.libvirtError:
LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
'for %(instance_name)s is not found.') % locals())

instance_secgroup_filter_name = \
'%s-secgroup' % (self._instance_filter_name(instance))
instance_secgroup_filter_name = ('%s-secgroup' %
self._instance_filter_name(instance))

try:
self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\
.undefine()
_nw = self._conn.nwfilterLookupByName(
instance_secgroup_filter_name)
_nw.undefine()
except libvirt.libvirtError:
LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) '
'for %(instance_name)s is not found.') % locals())
Expand All @@ -282,8 +283,8 @@ def prepare_instance_filter(self, instance, network_info):

ctxt = context.get_admin_context()

instance_secgroup_filter_name = \
'%s-secgroup' % (self._instance_filter_name(instance))
instance_secgroup_filter_name = ('%s-secgroup' %
self._instance_filter_name(instance))

instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
Expand All @@ -294,11 +295,11 @@ def prepare_instance_filter(self, instance, network_info):
info['gateway_v6']]

if networks:
instance_secgroup_filter_children.\
append('nova-allow-ra-server')
instance_secgroup_filter_children.append(
'nova-allow-ra-server')

for security_group in \
db.security_group_get_by_instance(ctxt, instance['id']):
for security_group in db.security_group_get_by_instance(ctxt,
instance['id']):

self.refresh_security_group_rules(security_group['id'])

Expand All @@ -309,9 +310,8 @@ def prepare_instance_filter(self, instance, network_info):
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))

network_filters = self.\
_create_network_filters(instance, network_info,
instance_secgroup_filter_name)
network_filters = self._create_network_filters(instance, network_info,
instance_secgroup_filter_name)

for (name, children) in network_filters:
self._define_filters(name, children)
Expand Down Expand Up @@ -372,15 +372,15 @@ def security_group_to_nwfilter_xml(security_group_id):
version = netutils.get_ip_version(rule.cidr)
if(FLAGS.use_ipv6 and version == 6):
net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(v6protocol[rule.protocol], net, prefixlen)
rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " %
(v6protocol[rule.protocol], net, prefixlen))
else:
net, mask = netutils.get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " %
(rule.protocol, net, mask))
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
rule_xml += ("dstportstart='%s' dstportend='%s' " %
(rule.from_port, rule.to_port))
elif rule.protocol == 'icmp':
LOG.info('rule.protocol: %r, rule.from_port: %r, '
'rule.to_port: %r', rule.protocol,
Expand Down Expand Up @@ -410,15 +410,15 @@ def provider_fw_to_nwfilter_xml():
version = netutils.get_ip_version(rule.cidr)
if(FLAGS.use_ipv6 and version == 6):
net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(v6protocol[rule.protocol], net, prefixlen)
rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " %
(v6protocol[rule.protocol], net, prefixlen))
else:
net, mask = netutils.get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " %
(rule.protocol, net, mask))
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
rule_xml += ("dstportstart='%s' dstportend='%s' " %
(rule.from_port, rule.to_port))
elif rule.protocol == 'icmp':
LOG.info('rule.protocol: %r, rule.from_port: %r, '
'rule.to_port: %r', rule.protocol,
Expand Down
7 changes: 3 additions & 4 deletions nova/virt/libvirt/utils.py
Expand Up @@ -31,10 +31,9 @@
from nova.virt import images


qemu_img_opt = \
cfg.StrOpt('qemu_img',
default='qemu-img',
help='binary to use for qemu-img commands')
qemu_img_opt = cfg.StrOpt('qemu_img',
default='qemu-img',
help='binary to use for qemu-img commands')

FLAGS = flags.FLAGS
FLAGS.add_option(qemu_img_opt)
Expand Down
7 changes: 3 additions & 4 deletions nova/virt/libvirt/vif.py
Expand Up @@ -31,10 +31,9 @@

LOG = logging.getLogger('nova.virt.libvirt.vif')

libvirt_ovs_bridge_opt = \
cfg.StrOpt('libvirt_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch')
libvirt_ovs_bridge_opt = cfg.StrOpt('libvirt_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch')

FLAGS = flags.FLAGS
FLAGS.add_option(libvirt_ovs_bridge_opt)
Expand Down
5 changes: 2 additions & 3 deletions nova/virt/vmwareapi/error_util.py
Expand Up @@ -84,11 +84,10 @@ def retrieveproperties_fault_checker(resp_obj):
for obj_cont in resp_obj:
if hasattr(obj_cont, "missingSet"):
for missing_elem in obj_cont.missingSet:
fault_type = \
missing_elem.fault.fault.__class__.__name__
fault_type = missing_elem.fault.fault.__class__
# Fault needs to be added to the type of fault for
# uniformity in error checking as SOAP faults define
fault_list.append(fault_type)
fault_list.append(fault_type.__name__)
if fault_list:
exc_msg_list = ', '.join(fault_list)
raise VimFaultException(fault_list, Exception(_("Error(s) %s "
Expand Down
10 changes: 4 additions & 6 deletions nova/virt/vmwareapi/fake.py
Expand Up @@ -545,8 +545,7 @@ def _reconfig_vm(self, method, *args, **kwargs):
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = \
vmdk_file_path.replace(".vmdk", "-flat.vmdk")
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
Expand All @@ -560,8 +559,7 @@ def _snapshot_vm(self, method):
def _delete_disk(self, method, *args, **kwargs):
"""Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
vmdk_file_path = kwargs.get("name")
flat_vmdk_file_path = \
vmdk_file_path.replace(".vmdk", "-flat.vmdk")
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_remove_file(vmdk_file_path)
_remove_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
Expand Down Expand Up @@ -652,8 +650,8 @@ def _retrieve_properties(self, method, *args, **kwargs):

def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
host_mdo = \
_db_content["HostSystem"][_db_content["HostSystem"].keys()[0]]
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))

def __getattr__(self, attr_name):
Expand Down
9 changes: 4 additions & 5 deletions nova/virt/vmwareapi/io_util.py
Expand Up @@ -88,17 +88,16 @@ def _inner():
self._running = True
while self._running:
try:
image_status = \
self.glance_client.get_image_meta(self.image_id).get(
"status")
_get_image_meta = self.glance_client.get_image_meta
image_status = _get_image_meta(self.image_id).get("status")
if image_status == "active":
self.stop()
self.done.send(True)
# If the state is killed, then raise an exception.
elif image_status == "killed":
self.stop()
exc_msg = _("Glance image %s is in killed state") %\
self.image_id
exc_msg = (_("Glance image %s is in killed state") %
self.image_id)
LOG.exception(exc_msg)
self.done.send_exception(exception.Error(exc_msg))
elif image_status in ["saving", "queued"]:
Expand Down
4 changes: 2 additions & 2 deletions nova/virt/vmwareapi/read_write_util.py
Expand Up @@ -124,8 +124,8 @@ def __init__(self, host, data_center_name, datastore_name, cookies,
base_url = "%s://%s/folder/%s" % (scheme, host, file_path)
param_list = {"dcPath": data_center_name, "dsName": datastore_name}
base_url = base_url + "?" + urllib.urlencode(param_list)
(scheme, netloc, path, params, query, fragment) = \
urlparse.urlparse(base_url)
_urlparse = urlparse.urlparse(base_url)
scheme, netloc, path, params, query, fragment = _urlparse
if scheme == "http":
conn = httplib.HTTPConnection(netloc)
elif scheme == "https":
Expand Down
5 changes: 2 additions & 3 deletions nova/virt/vmwareapi/vif.py
Expand Up @@ -69,9 +69,8 @@ def ensure_vlan_bridge(self, session, network):
vswitch_associated, vlan_num)
else:
# Get the vlan id and vswitch corresponding to the port group
pg_vlanid, pg_vswitch = \
network_utils.get_vlanid_and_vswitch_for_portgroup(session,
bridge)
_get_pg_info = network_utils.get_vlanid_and_vswitch_for_portgroup
pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)

# Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
Expand Down
20 changes: 9 additions & 11 deletions nova/virt/vmwareapi/vim.py
Expand Up @@ -34,13 +34,12 @@
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'

vmwareapi_wsdl_loc_opt = \
cfg.StrOpt('vmwareapi_wsdl_loc',
default=None,
help='VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Due to a bug in vSphere ESX 4.1 default wsdl. '
'Refer readme-vmware to setup')
vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc',
default=None,
help='VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Due to a bug in vSphere ESX 4.1 default wsdl. '
'Refer readme-vmware to setup')

FLAGS = flags.FLAGS
FLAGS.add_option(vmwareapi_wsdl_loc_opt)
Expand Down Expand Up @@ -95,8 +94,7 @@ def __init__(self,
url = '%s://%s/sdk' % (self._protocol, self._host_name)
self.client = suds.client.Client(wsdl_url, location=url,
plugins=[VIMMessagePlugin()])
self._service_content = \
self.RetrieveServiceContent("ServiceInstance")
self._service_content = self.RetrieveServiceContent("ServiceInstance")

def get_service_content(self):
"""Gets the service content object."""
Expand All @@ -119,8 +117,8 @@ def vim_request_handler(managed_object, **kwargs):
"""
# Dynamic handler for VI SDK Calls
try:
request_mo = \
self._request_managed_object_builder(managed_object)
request_mo = self._request_managed_object_builder(
managed_object)
request = getattr(self.client.service, attr_name)
response = request(request_mo, **kwargs)
# To check for the faults that are part of the message body
Expand Down
6 changes: 2 additions & 4 deletions nova/virt/vmwareapi/vim_util.py
Expand Up @@ -148,8 +148,7 @@ def get_object_properties(vim, collector, mobj, type, properties):

def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
obj_content = \
get_object_properties(vim, None, mobj, type, [property_name])
obj_content = get_object_properties(vim, None, mobj, type, [property_name])
property_value = None
if obj_content:
dynamic_property = obj_content[0].propSet
Expand Down Expand Up @@ -197,8 +196,7 @@ def get_obj_spec(client_factory, obj, select_set=None):

def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = \
client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
Expand Down

0 comments on commit 5adaf44

Please sign in to comment.