Skip to content

Commit

Permalink
Metadata support for NVP plugin
Browse files Browse the repository at this point in the history
Bug #1121119

Allows the NVP plugin to leverage the metadata proxy, by creating an
ad-hoc topology for allowing access to a metadata proxy from a NVP
router leveraging existing agents.

This patch also removes previous code for metadata support in the
NVP plugin, which was based on DHCP Option 121. This is now provided
by the dhcp agent as well.

Change-Id: If37ef388e063f40bb06908ee2f72c431f29dac31
  • Loading branch information
salv-orlando committed Feb 18, 2013
1 parent f8ccfc5 commit 24c7796
Show file tree
Hide file tree
Showing 9 changed files with 438 additions and 84 deletions.
17 changes: 12 additions & 5 deletions etc/dhcp_agent.ini
Expand Up @@ -30,9 +30,16 @@ dhcp_driver = quantum.agent.linux.dhcp.Dnsmasq
# iproute2 package that supports namespaces).
# use_namespaces = True

# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet gateway_ip is None. The guest instance must
# be configured to request host routes via DHCP (Option 121).
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet gateway_ip is None. The guest instance must
# be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Quantum router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False
10 changes: 3 additions & 7 deletions etc/quantum/plugins/nicira/nvp.ini
@@ -1,10 +1,3 @@
[DEFAULT]

# The following flag will cause a host route to the metadata server
# to be injected into instances. The metadata server will be reached
# via the dhcp server.
metadata_dhcp_host_route = False

[DATABASE]
# This line MUST be changed to actually run the plugin.
# Example:
Expand Down Expand Up @@ -39,6 +32,9 @@ reconnect_interval = 2
# is not specified. If it is empty or reference a non-existent cluster
# the first cluster specified in this configuration file will be used
# default_cluster_name =
# The following flag enables the creation of a dedicated connection
# to the metadata proxy for metadata server access via Quantum router
# enable_metadata_access_network = True

#[CLUSTER:example]
# This is uuid of the default NVP Transport zone that will be used for
Expand Down
55 changes: 50 additions & 5 deletions quantum/agent/dhcp_agent.py
Expand Up @@ -29,6 +29,7 @@
from quantum.agent.linux import interface
from quantum.agent.linux import ip_lib
from quantum.agent import rpc as agent_rpc
from quantum.common import constants
from quantum.common import exceptions
from quantum.common import topics
from quantum import context
Expand All @@ -40,7 +41,8 @@

LOG = logging.getLogger(__name__)
NS_PREFIX = 'qdhcp-'
METADATA_DEFAULT_IP = '169.254.169.254/16'
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254/%d' % METADATA_DEFAULT_PREFIX
METADATA_PORT = 80


Expand All @@ -54,7 +56,11 @@ class DhcpAgent(object):
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.BoolOpt('enable_isolated_metadata', default=False,
help=_("Support Metadata requests on isolated networks."))
help=_("Support Metadata requests on isolated networks.")),
cfg.BoolOpt('enable_metadata_network', default=False,
help=_("Allows for serving metadata requests from a "
"dedicate network. Requires "
"enable isolated_metadata = True "))
]

def __init__(self, conf):
Expand Down Expand Up @@ -245,13 +251,37 @@ def port_delete_end(self, payload):
self.call_driver('reload_allocations', network)

def enable_isolated_metadata_proxy(self, network):

# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
quantum_lookup_param = '--network_id=%s' % network.id
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets)
if (self.conf.enable_metadata_network and has_metadata_subnet):
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
quantum_lookup_param = ('--router_id=%s' %
router_ports[0].device_id)

def callback(pid_file):
return ['quantum-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--network_id=%s' % network.id,
quantum_lookup_param,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%d' % METADATA_PORT]

pm = external_process.ProcessManager(
self.conf,
network.id,
Expand Down Expand Up @@ -480,7 +510,9 @@ def setup(self, network, reuse_existing=False):
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)

if self.conf.enable_isolated_metadata and self.conf.use_namespaces:
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces and
not self.conf.enable_metadata_network):
ip_cidrs.append(METADATA_DEFAULT_IP)

self.driver.init_l3(interface_name, ip_cidrs,
Expand All @@ -492,6 +524,19 @@ def setup(self, network, reuse_existing=False):
self.root_helper)
device.route.pullup_route(interface_name)

if self.conf.enable_metadata_network:
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
metadata_subnets = [s for s in network.subnets if
netaddr.IPNetwork(s.cidr) in meta_cidr]
if metadata_subnets:
# Add a gateway so that packets can be routed back to VMs
device = ip_lib.IPDevice(interface_name,
self.root_helper,
namespace)
# Only 1 subnet on metadata access network
gateway_ip = metadata_subnets[0].gateway_ip
device.route.add_gateway(gateway_ip)

return interface_name

def destroy(self, network, device_name):
Expand Down
63 changes: 20 additions & 43 deletions quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py
Expand Up @@ -47,6 +47,8 @@
from quantum.extensions import providernet as pnet
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import rpc
from quantum.plugins.nicira.nicira_nvp_plugin.common import (metadata_access
as nvp_meta)
from quantum.plugins.nicira.nicira_nvp_plugin.common import (securitygroups
as nvp_sec)
from quantum import policy
Expand Down Expand Up @@ -84,7 +86,11 @@ def parse_config():
NVPCluster objects, 'plugin_config' is a dictionary with plugin
parameters (currently only 'max_lp_per_bridged_ls').
"""
nvp_options = cfg.CONF.NVP
# Warn if metadata_dhcp_host_route option is specified
if cfg.CONF.metadata_dhcp_host_route:
LOG.warning(_("The metadata_dhcp_host_route is now obsolete, and "
"will have no effect. Instead, please set the "
"enable_isolated_metadata option in dhcp_agent.ini"))
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
cluster_names = config.register_cluster_groups(nvp_conf)
nvp_conf.log_opt_values(LOG, logging.DEBUG)
Expand All @@ -104,7 +110,7 @@ def parse_config():
'default_l3_gw_service_uuid':
nvp_conf[cluster_name].default_l3_gw_service_uuid})
LOG.debug(_("Cluster options:%s"), clusters_options)
return nvp_options, clusters_options
return cfg.CONF.NVP, clusters_options


class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
Expand All @@ -125,7 +131,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin,
portsecurity_db.PortSecurityDbMixin,
securitygroups_db.SecurityGroupDbMixin,
nvp_sec.NVPSecurityGroups, qos_db.NVPQoSDbMixin):
nvp_sec.NVPSecurityGroups,
qos_db.NVPQoSDbMixin,
nvp_meta.NvpMetadataAccess):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
Expand Down Expand Up @@ -671,26 +679,6 @@ def _handle_lswitch_selection(self, cluster, network,
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)

def _ensure_metadata_host_route(self, context, fixed_ip_data,
is_delete=False):
subnet = self._get_subnet(context, fixed_ip_data['subnet_id'])
metadata_routes = [r for r in subnet.routes
if r['destination'] == '169.254.169.254/32']
if metadata_routes:
# We should have only a single metadata route at any time
# because the route logic forbids two routes with the same
# destination. Update next hop with the provided IP address
if not is_delete:
metadata_routes[0].nexthop = fixed_ip_data['ip_address']
else:
context.session.delete(metadata_routes[0])
else:
# add the metadata route
route = models_v2.Route(subnet_id=subnet.id,
destination='169.254.169.254/32',
nexthop=fixed_ip_data['ip_address'])
context.session.add(route)

def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
Expand Down Expand Up @@ -1100,16 +1088,6 @@ def create_port(self, context, port):
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# If we have just created a dhcp port, and metadata request are
# forwarded there, we need to verify the appropriate host route is
# in place
if (cfg.CONF.metadata_dhcp_host_route and
(quantum_db.get('device_owner') ==
constants.DEVICE_OWNER_DHCP)):
if (quantum_db.get('fixed_ips') and
len(quantum_db.get('fixed_ips'))):
self._ensure_metadata_host_route(
context, quantum_db.get('fixed_ips')[0])
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
# port security extension checks
Expand Down Expand Up @@ -1172,16 +1150,6 @@ def update_port(self, context, id, port):
# copy values over
ret_port.update(port['port'])

# TODO(salvatore-orlando): We might need transaction management
# but the change for metadata support should not be too disruptive
fixed_ip_data = port['port'].get('fixed_ips')
if (cfg.CONF.metadata_dhcp_host_route and
ret_port.get('device_owner') == constants.DEVICE_OWNER_DHCP
and fixed_ip_data):
self._ensure_metadata_host_route(context,
fixed_ip_data[0],
is_delete=True)

# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
Expand Down Expand Up @@ -1526,6 +1494,10 @@ def add_router_interface(self, context, router_id, interface_info):
order=NVP_EXTGW_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': subnet['cidr']})

# Ensure the NVP logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self._handle_metadata_access_network(context, router_id)
LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s"),
{'subnet_id': subnet_id, 'router_id': router_id})
Expand Down Expand Up @@ -1585,6 +1557,11 @@ def remove_router_interface(self, context, router_id, interface_info):
{'q_port_id': port_id,
'nvp_port_id': lport['uuid']})
return

# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self._handle_metadata_access_network(context, router_id)
try:
if not subnet:
subnet = self._get_subnet(context, subnet_id)
Expand Down
3 changes: 3 additions & 0 deletions quantum/plugins/nicira/nicira_nvp_plugin/common/config.py
Expand Up @@ -34,6 +34,9 @@
"(default -1 meaning do not time out)")),
cfg.StrOpt('default_cluster_name',
help=_("Default cluster name")),
cfg.BoolOpt('enable_metadata_access_network', default=True,
help=_("Enables dedicated connection to the metadata proxy "
"for metadata server access via Quantum router")),
]

cluster_opts = [
Expand Down

0 comments on commit 24c7796

Please sign in to comment.