Skip to content

Commit

Permalink
octavia: Make Octavia ready devstack
Browse files Browse the repository at this point in the history
This patch changes the main sample devstack local.conf to use Octavia.
In order for that to work, it does some security group changes to ensure
that the communication from the LB to the members will work in L3 modes.

In L2 mode, which will be added at some point after this patch Octavia
creates a pod_subnet port per each Load Balancer with the 'default'
security group of the 'admin' project. This means that it would not be
allowed by the members since they use the 'default' security group from
the 'k8s' project.

In L3 mode, Octavia does not create a port in the members subnet and
relies on the service and the pod subnet to be connected to the same
router. Some changes were necessary on the lbaas handler for that.
Specifically, changing the member subnet to be the service subnet so
that Octavia does not go into L2 mode.

Implements: blueprint octavia-support
Change-Id: I993ebb0d7b82ad1140d752982013bbadf35dfef7
Closes-Bug: #1707180
Signed-off-by: Antoni Segura Puimedon <antonisp@celebdor.com>
  • Loading branch information
celebdor committed Aug 2, 2017
1 parent 3cba3eb commit ed1436f
Show file tree
Hide file tree
Showing 6 changed files with 120 additions and 85 deletions.
55 changes: 50 additions & 5 deletions devstack/local.conf.sample
Expand Up @@ -35,11 +35,44 @@ enable_service q-dhcp
enable_service q-l3
enable_service q-svc

# LBaaSv2 service and Haproxy agent
enable_plugin neutron-lbaas \
git://git.openstack.org/openstack/neutron-lbaas
enable_service q-lbaasv2
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
KURYR_K8S_LBAAS_USE_OCTAVIA=True

if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
# Octavia LBaaSv2
LIBS_FROM_GIT+=python-octaviaclient
enable_plugin octavia https://git.openstack.org/openstack/octavia
enable_service octavia
enable_service o-api
enable_service o-cw
enable_service o-hm
enable_service o-hk
## Octavia Deps
### Image
### Barbican
enable_plugin barbican https://git.openstack.org/openstack/barbican
### Nova
enable_service n-api
enable_service n-api-meta
enable_service n-cpu
enable_service n-cond
enable_service n-sch
enable_service placement-api
enable_service placement-client
### Glance
enable_service g-api
enable_service g-reg
### Neutron-lbaas
#### In case Octavia is older than Pike, neutron-lbaas is needed
enable_plugin neutron-lbaas \
git://git.openstack.org/openstack/neutron-lbaas
enable_service q-lbaasv2
else
# LBaaSv2 service and Haproxy agent
enable_plugin neutron-lbaas \
git://git.openstack.org/openstack/neutron-lbaas
enable_service q-lbaasv2
fi


# Keystone
enable_service key
Expand Down Expand Up @@ -139,3 +172,15 @@ enable_service kubelet
# part of the codebase that connects to the Kubernetes API server to read the
# resource events and convert them to Neutron actions
enable_service kuryr-kubernetes

# Increase Octavia amphorae timeout so that the first LB amphora has time to
# build and boot
if [[ "$KURYR_K8S_LBAAS_USE_OCTAVIA" == "True" ]]; then
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
else
NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"
fi

[[post-config|$OCTAVIA_CONF_DIR/octavia.conf]]
[controller_worker]
amp_active_retries=9999
28 changes: 25 additions & 3 deletions devstack/plugin.sh
Expand Up @@ -206,9 +206,6 @@ function configure_neutron_defaults {
pod_subnet_id="$(neutron subnet-show -c id -f value \
"${KURYR_NEUTRON_DEFAULT_POD_SUBNET}")"

sg_ids=$(echo $(neutron security-group-list \
--project-id "$project_id" -c id -f value) | tr ' ' ',')

create_k8s_subnet "$project_id" \
"$KURYR_NEUTRON_DEFAULT_SERVICE_NET" \
"$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
Expand All @@ -217,6 +214,31 @@ function configure_neutron_defaults {
service_subnet_id="$(neutron subnet-show -c id -f value \
"${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}")"

sg_ids=$(echo $(neutron security-group-list \
--project-id "$project_id" -c id -f value) | tr ' ' ',')

local use_octavia
use_octavia=$(trueorfalse True KURYR_K8S_LBAAS_USE_OCTAVIA)
if [[ "$use_octavia" == "True" ]]; then
# In order for the pods to allow service traffic under Octavia L3 mode,
#it is necessary for the service subnet to be allowed into the $sg_ids
local service_cidr
local service_pod_access_sg_id
service_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet show \
"${KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET}" -f value -c cidr)
service_pod_access_sg_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
security group create --project "$project_id" \
service_pod_access -f value -c id)
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "k8s service subnet allowed" \
--remote-ip "$service_cidr" --ethertype IPv4 --protocol tcp \
"$service_pod_access_sg_id"
sg_ids+=",${service_pod_access_sg_id}"
fi

iniset "$KURYR_CONFIG" neutron_defaults project "$project_id"
iniset "$KURYR_CONFIG" neutron_defaults pod_subnet "$pod_subnet_id"
iniset "$KURYR_CONFIG" neutron_defaults pod_security_groups "$sg_ids"
Expand Down
4 changes: 4 additions & 0 deletions devstack/settings
Expand Up @@ -41,5 +41,9 @@ KURYR_K8S_API_CERT=${KURYR_K8S_API_CERT:-}
KURYR_K8S_API_KEY=${KURYR_K8S_API_KEY:-}
KURYR_K8S_API_CACERT=${KURYR_K8S_API_CACERT:-}

# Octavia
KURYR_K8S_LBAAS_USE_OCTAVIA=${KURYR_K8S_LBAAS_USE_OCTAVIA:-True}
KURYR_K8S_OCTAVIA_MEMBER_MODE=${KURYR_K8S_OCTAVIA_MEMBER_MODE:-L3}

# Kuryr_ovs_baremetal
KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE=${KURYR_CONFIGURE_BAREMETAL_KUBELET_IFACE:-True}
32 changes: 32 additions & 0 deletions doc/source/installation/manual.rst
Expand Up @@ -51,6 +51,38 @@ Edit ``kuryr.conf``::
project = {id_of_project}
service_subnet = {id_of_subnet_for_k8s_services}

Note that the service_subnet and the pod_subnet *should be routable* and that
the pods should allow service subnet access.

Octavia supports two ways of performing the load balancing between the
Kubernetes load balancers and their members:

* Layer2: Octavia, apart from the VIP port in the services subnet, creates a
Neutron port to the subnet of each of the members. This way the traffic from
the Service Haproxy to the members will not go through the router again, only
will have gone through the router to reach the service.
* Layer3: Octavia only creates the VIP port. The traffic from the service VIP to
the members will go back to the router to reach the pod subnet. It is
important to note that will have some performance impact depending on the SDN.

At the moment Kuryr-Kubernetes supports only L3 mode (both for Octavia and for
the deprecated Neutron-LBaaSv2.

This means that:

* There should be a router between the two subnets.
* The pod_security_groups setting should include a security group with a rule
granting access to all the CIDR or the service subnet, e.g.::

openstack security group create --project k8s_cluster_project \
service_pod_access_sg
openstack --project k8s_cluster_project security group rule create \
--remote-ip cidr_of_service_subnet --ethertype IPv4 --protocol tcp \
service_pod_access_sg

* The uuid of this security group id should be added to the comma separated
list of pod security groups. *pod_security_groups* in *[neutron_defaults]*.

Run kuryr-k8s-controller::

$ kuryr-k8s-controller --config-file /etc/kuryr/kuryr.conf -d
Expand Down
23 changes: 8 additions & 15 deletions kuryr_kubernetes/controller/handlers/lbaas.py
Expand Up @@ -300,13 +300,18 @@ def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):
continue
port_name = subset_port.get('name')
pool = pool_by_tgt_name[port_name]
target_subnet_id = self._get_pod_subnet(target_ref,
target_ip)
# We use the service subnet id so that the connectivity
# from VIP to pods happens in layer 3 mode, i.e., routed.
# TODO(apuimedo): Add L2 mode
# TODO(apuimedo): Do not pass subnet_id at all when in
# L3 mode once old neutron-lbaasv2 is not supported, as
# octavia does not require it
member_subnet_id = lbaas_state.loadbalancer.subnet_id
member = self._drv_lbaas.ensure_member(
endpoints=endpoints,
loadbalancer=lbaas_state.loadbalancer,
pool=pool,
subnet_id=target_subnet_id,
subnet_id=member_subnet_id,
ip=target_ip,
port=target_port,
target_ref=target_ref)
Expand All @@ -315,18 +320,6 @@ def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):

return changed

def _get_pod_subnet(self, target_ref, ip):
# REVISIT(ivc): consider using true pod object instead
pod = {'kind': target_ref['kind'],
'metadata': {'name': target_ref['name'],
'namespace': target_ref['namespace']}}
project_id = self._drv_pod_project.get_project(pod)
subnets_map = self._drv_pod_subnets.get_subnets(pod, project_id)
# FIXME(ivc): potentially unsafe [0] index
return [subnet_id for subnet_id, network in subnets_map.items()
for subnet in network.subnets.objects
if ip in subnet.cidr][0]

def _remove_unused_members(self, endpoints, lbaas_state, lbaas_spec):
spec_port_names = {p.name for p in lbaas_spec.ports}
current_targets = {(a['ip'], p['port'])
Expand Down
63 changes: 1 addition & 62 deletions kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
Expand Up @@ -118,43 +118,6 @@ def _make_test_net_obj(self, cidr_list):
subnets_list = osv_subnet.SubnetList(objects=subnets)
return osv_network.Network(subnets=subnets_list)

def test_get_subnet_id(self):
test_ip = '1.2.3.4'
test_cidr = '1.2.3.0/24'
m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
m_drv_subnets = mock.Mock(spec=drv_base.ServiceSubnetsDriver)
m_handler._drv_subnets = m_drv_subnets
m_drv_subnets.get_subnets.return_value = {
mock.sentinel.subnet_id: self._make_test_net_obj([test_cidr])
}

self.assertEqual(mock.sentinel.subnet_id,
h_lbaas.LBaaSSpecHandler._get_subnet_id(
m_handler,
mock.sentinel.service,
mock.sentinel.project_id,
test_ip))
m_drv_subnets.get_subnets.assert_called_once_with(
mock.sentinel.service, mock.sentinel.project_id)

def test_get_subnet_id_invalid(self):
test_ip = '1.2.3.4'
test_cidr = '3.2.1.0/24'
m_service = mock.MagicMock()
m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
m_drv_subnets = mock.Mock(spec=drv_base.ServiceSubnetsDriver)
m_handler._drv_subnets = m_drv_subnets
m_drv_subnets.get_subnets.return_value = {
mock.sentinel.subnet_id: self._make_test_net_obj([test_cidr])
}

self.assertRaises(k_exc.IntegrityError,
h_lbaas.LBaaSSpecHandler._get_subnet_id,
m_handler,
m_service,
mock.sentinel.project_id,
test_ip)

def test_generate_lbaas_spec(self):
m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)

Expand Down Expand Up @@ -494,28 +457,6 @@ def test_has_pods(self):

self.assertEqual(True, ret)

def test_get_pod_subnet(self):
subnet_id = mock.sentinel.subnet_id
project_id = mock.sentinel.project_id
target_ref = {'kind': k_const.K8S_OBJ_POD,
'name': 'pod-name',
'namespace': 'default'}
ip = '1.2.3.4'
m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
m_drv_pod_project = mock.Mock()
m_drv_pod_project.get_project.return_value = project_id
m_handler._drv_pod_project = m_drv_pod_project
m_drv_pod_subnets = mock.Mock()
m_drv_pod_subnets.get_subnets.return_value = {
subnet_id: osv_network.Network(subnets=osv_subnet.SubnetList(
objects=[osv_subnet.Subnet(cidr='1.2.3.0/24')]))}
m_handler._drv_pod_subnets = m_drv_pod_subnets

observed_subnet_id = h_lbaas.LoadBalancerHandler._get_pod_subnet(
m_handler, target_ref, ip)

self.assertEqual(subnet_id, observed_subnet_id)

def _generate_lbaas_state(self, vip, targets, project_id, subnet_id):
endpoints = mock.sentinel.endpoints
drv = FakeLBaaSDriver()
Expand Down Expand Up @@ -616,9 +557,7 @@ def test_sync_lbaas_members(self, m_get_drv_lbaas, m_get_drv_project,

handler = h_lbaas.LoadBalancerHandler()

with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:
m_get_pod_subnet.return_value = subnet_id
handler._sync_lbaas_members(endpoints, state, spec)
handler._sync_lbaas_members(endpoints, state, spec)

lsnrs = {lsnr.id: lsnr for lsnr in state.listeners}
pools = {pool.id: pool for pool in state.pools}
Expand Down

0 comments on commit ed1436f

Please sign in to comment.