diff --git a/.zuul.d/octavia.yaml b/.zuul.d/octavia.yaml
index de1d2d9bc..6c0b4bade 100644
--- a/.zuul.d/octavia.yaml
+++ b/.zuul.d/octavia.yaml
@@ -99,7 +99,7 @@
vars:
devstack_localrc:
DOCKER_CGROUP_DRIVER: "systemd"
- KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
+ KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace
devstack_services:
@@ -120,7 +120,7 @@
vars:
devstack_localrc:
KURYR_SUBNET_DRIVER: namespace
- KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
+ KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_USE_PORT_POOLS: true
KURYR_POD_VIF_DRIVER: neutron-vif
@@ -134,7 +134,7 @@
parent: kuryr-kubernetes-tempest-containerized
vars:
devstack_localrc:
- KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
+ KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace
diff --git a/.zuul.d/sdn.yaml b/.zuul.d/sdn.yaml
index d38bb589e..184cb1b61 100644
--- a/.zuul.d/sdn.yaml
+++ b/.zuul.d/sdn.yaml
@@ -98,7 +98,7 @@
KURYR_LB_ALGORITHM: SOURCE_IP_PORT
KURYR_SUBNET_DRIVER: namespace
KURYR_SG_DRIVER: policy
- KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
+ KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
voting: false
- job:
@@ -144,7 +144,7 @@
KURYR_ENFORCE_SG_RULES: false
KURYR_LB_ALGORITHM: SOURCE_IP_PORT
KURYR_HYPERKUBE_VERSION: v1.16.0
- KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy,kuryrnetwork
+ KURYR_ENABLED_HANDLERS: vif,endpoints,service,namespace,pod_label,policy,kuryrnetworkpolicy,kuryrnetwork,kuryrport,kuryrloadbalancer
KURYR_SG_DRIVER: policy
KURYR_SUBNET_DRIVER: namespace
KURYR_K8S_CONTAINERIZED_DEPLOYMENT: true
diff --git a/cni.Dockerfile b/cni.Dockerfile
index 803cd58b1..b4e547288 100644
--- a/cni.Dockerfile
+++ b/cni.Dockerfile
@@ -18,7 +18,8 @@ RUN yum upgrade -y \
COPY . /opt/kuryr-kubernetes
-RUN pip3 install -c $UPPER_CONSTRAINTS_FILE /opt/kuryr-kubernetes \
+RUN pip3 install -U pip \
+ && python3 -m pip install -c $UPPER_CONSTRAINTS_FILE /opt/kuryr-kubernetes \
&& cp /opt/kuryr-kubernetes/cni_ds_init /usr/bin/cni_ds_init \
&& mkdir -p /etc/kuryr-cni \
&& cp /opt/kuryr-kubernetes/etc/cni/net.d/* /etc/kuryr-cni \
diff --git a/controller.Dockerfile b/controller.Dockerfile
index a7093fbbc..662eac3c4 100644
--- a/controller.Dockerfile
+++ b/controller.Dockerfile
@@ -10,7 +10,8 @@ RUN yum upgrade -y \
COPY . /opt/kuryr-kubernetes
-RUN pip3 install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir /opt/kuryr-kubernetes \
+RUN pip3 install -U pip \
+ && python3 -m pip install -c $UPPER_CONSTRAINTS_FILE --no-cache-dir /opt/kuryr-kubernetes \
&& yum -y history undo last \
&& yum clean all \
&& rm -rf /opt/kuryr-kubernetes \
diff --git a/devstack/lib/kuryr_kubernetes b/devstack/lib/kuryr_kubernetes
index 731017d1d..8f20b65d8 100644
--- a/devstack/lib/kuryr_kubernetes
+++ b/devstack/lib/kuryr_kubernetes
@@ -452,7 +452,9 @@ rules:
- kuryrnets
- kuryrnetworks
- kuryrnetpolicies
+ - kuryrnetworkpolicies
- kuryrloadbalancers
+ - kuryrports
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
diff --git a/devstack/local.conf.odl.sample b/devstack/local.conf.odl.sample
index 243464887..581c7f2b2 100644
--- a/devstack/local.conf.odl.sample
+++ b/devstack/local.conf.odl.sample
@@ -27,10 +27,11 @@ IDENTITY_API_VERSION=3
ENABLED_SERVICES=""
# Neutron services
-enable_service neutron
+enable_plugin neutron https://opendev.org/openstack/neutron
enable_service q-dhcp
-enable_service q-svc
+enable_service q-api
enable_service q-meta
+enable_service q-svc
# LBaaSv2 service and Haproxy agent
enable_plugin neutron-lbaas \
diff --git a/devstack/local.conf.openshift.sample b/devstack/local.conf.openshift.sample
index 04d918bb9..d9a0f7eae 100644
--- a/devstack/local.conf.openshift.sample
+++ b/devstack/local.conf.openshift.sample
@@ -27,12 +27,13 @@ IDENTITY_API_VERSION=3
ENABLED_SERVICES=""
# Neutron services
-enable_service neutron
+enable_plugin neutron https://opendev.org/openstack/neutron
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
-enable_service q-svc
+enable_service q-api
enable_service q-meta
+enable_service q-svc
# OCTAVIA
# Uncomment it to use L2 communication between loadbalancer and member pods
diff --git a/devstack/local.conf.pod-in-vm.undercloud.odl.sample b/devstack/local.conf.pod-in-vm.undercloud.odl.sample
index 7f7f009f2..85435f164 100644
--- a/devstack/local.conf.pod-in-vm.undercloud.odl.sample
+++ b/devstack/local.conf.pod-in-vm.undercloud.odl.sample
@@ -24,7 +24,7 @@ IDENTITY_API_VERSION=3
ENABLED_SERVICES=""
# Neutron services
-enable_service neutron
+enable_plugin neutron https://opendev.org/openstack/neutron
enable_service q-dhcp
enable_service q-svc
enable_service q-meta
diff --git a/devstack/local.conf.sample b/devstack/local.conf.sample
index 0d07a6601..f6744105f 100644
--- a/devstack/local.conf.sample
+++ b/devstack/local.conf.sample
@@ -27,12 +27,13 @@ IDENTITY_API_VERSION=3
ENABLED_SERVICES=""
# Neutron services
-enable_service neutron
+enable_plugin neutron https://opendev.org/openstack/neutron
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
-enable_service q-svc
+enable_service q-api
enable_service q-meta
+enable_service q-svc
# VAR RUN PATH
# =============
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 1fd4c4263..2fce77cd9 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -973,6 +973,9 @@ function update_tempest_conf_file {
fi
iniset $TEMPEST_CONFIG kuryr_kubernetes validate_crd True
iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrnetworks True
+ iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrports True
+ iniset $TEMPEST_CONFIG kuryr_kubernetes kuryrloadbalancers True
+ iniset $TEMPEST_CONFIG kuryr_kubernetes new_kuryrnetworkpolicy_crd True
}
source $DEST/kuryr-kubernetes/devstack/lib/kuryr_kubernetes
diff --git a/devstack/settings b/devstack/settings
index edd61626f..175f1bd79 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -43,7 +43,7 @@ KURYR_K8S_API_LB_PORT=${KURYR_K8S_API_LB_PORT:-443}
KURYR_PORT_DEBUG=${KURYR_PORT_DEBUG:-True}
KURYR_SUBNET_DRIVER=${KURYR_SUBNET_DRIVER:-default}
KURYR_SG_DRIVER=${KURYR_SG_DRIVER:-default}
-KURYR_ENABLED_HANDLERS=${KURYR_ENABLED_HANDLERS:-vif,lb,lbaasspec}
+KURYR_ENABLED_HANDLERS=${KURYR_ENABLED_HANDLERS:-vif,endpoints,service,kuryrloadbalancer,kuryrport}
# OpenShift
OPENSHIFT_BINARY_VERSION=${OPENSHIFT_BINARY_VERSION:-v3.11.0}
diff --git a/doc/source/devref/network_policy.rst b/doc/source/devref/network_policy.rst
index 8f4cd2564..cd43036c7 100644
--- a/doc/source/devref/network_policy.rst
+++ b/doc/source/devref/network_policy.rst
@@ -47,22 +47,22 @@ The network policy CRD has the following format:
.. code-block:: yaml
apiVersion: openstack.org/v1
- kind: KuryrNetPolicy
+ kind: KuryrNetworkPolicy
metadata:
...
spec:
egressSgRules:
- - security_group_rule:
+ - sgRule:
...
ingressSgRules:
- - security_group_rule:
- ...
- networkpolicy_spec:
+ - sgRule:
...
podSelector:
...
+ status:
securityGroupId: ...
- securityGroupName: ...
+ podSelector: ...
+ securityGroupRules: ...
A new handler has been added to react to Network Policy events, and the existing
ones, for instance service/pod handlers, have been modified to account for the
@@ -201,26 +201,25 @@ are assumed to assumed to affect Ingress.
.. code-block:: yaml
apiVersion: openstack.org/v1
- kind: KuryrNetPolicy
+ kind: KuryrNetworkPolicy
metadata:
- name: np-default-deny
+ name: default-deny
namespace: default
...
spec:
egressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
- id: 60a0d59c-2102-43e0-b025-75c98b7d9315
security_group_id: 20d9b623-f1e0-449d-95c1-01624cb3e315
ingressSgRules: []
- networkpolicy_spec:
- ...
podSelector:
...
+ status:
securityGroupId: 20d9b623-f1e0-449d-95c1-01624cb3e315
- securityGroupName: sg-default-deny
+ securityGroupRules: ...
+ podSelector: ...
Allow traffic from pod
@@ -263,37 +262,33 @@ restriction was enforced.
.. code-block:: yaml
apiVersion: openstack.org/v1
- kind: KuryrNetPolicy
+ kind: KuryrNetworkPolicy
metadata:
- name: np-allow-monitoring-via-pod-selector
+ name: allow-monitoring-via-pod-selector
namespace: default
...
spec:
egressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
- id: 203a14fe-1059-4eff-93ed-a42bd957145d
- security_group_id: 7f0ef8c2-4846-4d8c-952f-94a9098fff17
ingressSgRules:
- namespace: default
- security_group_rule:
+ sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
- id: 7987c382-f2a9-47f7-b6e8-1a3a1bcb7d95
port_range_max: 8080
port_range_min: 8080
protocol: tcp
remote_ip_prefix: 10.0.1.143
- security_group_id: 7f0ef8c2-4846-4d8c-952f-94a9098fff17
- networkpolicy_spec:
- ...
podSelector:
...
+ status:
securityGroupId: 7f0ef8c2-4846-4d8c-952f-94a9098fff17
- securityGroupName: sg-allow-monitoring-via-pod-selector
+ securityGroupRules: ...
+ podSelector: ...
Allow traffic from namespace
@@ -337,36 +332,32 @@ egress rule allowing traffic to everywhere.
.. code-block:: yaml
apiVersion: openstack.org/v1
- kind: KuryrNetPolicy
- name: np-allow-test-via-ns-selector
+ kind: KuryrNetworkPolicy
+ name: allow-test-via-ns-selector
namespace: default
...
spec:
egressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
- id: 8c21bf42-c8b9-4628-b0a1-bd0dbb192e6b
- security_group_id: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9
ingressSgRules:
- namespace: dev
- security_group_rule:
+ sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
- id: 2a33b802-56ad-430a-801d-690f653198ef
port_range_max: 8080
port_range_min: 8080
protocol: tcp
remote_ip_prefix: 10.0.1.192/26
- security_group_id: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9
- networkpolicy_spec:
- ...
podSelector:
...
+ status:
securityGroupId: c480327c-2db4-4eb6-af1e-eeb0ce9b46c9
- securityGroupName: sg-allow-test-via-ns-selector
+ securityGroupRules: ...
+ podSelector: ...
.. note::
diff --git a/doc/source/installation/manual.rst b/doc/source/installation/manual.rst
index 2a4823f5f..c3992e36b 100644
--- a/doc/source/installation/manual.rst
+++ b/doc/source/installation/manual.rst
@@ -95,6 +95,7 @@ Edit ``kuryr.conf``:
- kuryrnets
- kuryrnetworks
- kuryrnetpolicies
+ - kuryrnetworkpolicies
- kuryrloadbalancers
- apiGroups: ["networking.k8s.io"]
resources:
diff --git a/doc/source/installation/network_policy.rst b/doc/source/installation/network_policy.rst
index 6917c9e29..bdb8de446 100644
--- a/doc/source/installation/network_policy.rst
+++ b/doc/source/installation/network_policy.rst
@@ -10,7 +10,7 @@ be found at :doc:`./devstack/containerized`):
.. code-block:: ini
[kubernetes]
- enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetwork,kuryrnetpolicy
+ enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetwork,kuryrnetworkpolicy
Note that if you also want to enable prepopulation of ports pools upon new
namespace creation, you need to also dd the kuryrnetwork_population handler
@@ -19,7 +19,7 @@ namespace creation, you need to also dd the kuryrnetwork_population handler
.. code-block:: ini
[kubernetes]
- enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy,kuryrnetwork,kuryrnetwork_population
+ enabled_handlers=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetworkpolicy,kuryrnetwork,kuryrnetwork_population
After that, enable also the security group drivers for policies:
@@ -82,7 +82,7 @@ to add the policy, pod_label and namespace handler and drivers with:
.. code-block:: bash
- KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetpolicy
+ KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,policy,pod_label,namespace,kuryrnetworkpolicy
KURYR_SG_DRIVER=policy
KURYR_SUBNET_DRIVER=namespace
@@ -143,9 +143,9 @@ Testing the network policy support functionality
.. code-block:: console
- $ kubectl get kuryrnetpolicies
+ $ kubectl get kuryrnetworkpolicies
NAME AGE
- np-test-network-policy 2s
+ test-network-policy 2s
$ kubectl get networkpolicies
NAME POD-SELECTOR AGE
@@ -158,69 +158,42 @@ Testing the network policy support functionality
.. code-block:: console
- $ kubectl get kuryrnetpolicy np-test-network-policy -o yaml
+ $ kubectl get kuryrnetworkpolicy test-network-policy -o yaml
apiVersion: openstack.org/v1
- kind: KuryrNetPolicy
+ kind: KuryrNetworkPolicy
metadata:
annotations:
- networkpolicy_name: test-network-policy
- networkpolicy_namespace: default
- networkpolicy_uid: aee1c59f-c634-11e8-b63d-002564fdd760
+ networkPolicyLink:
clusterName: ""
creationTimestamp: 2018-10-02T11:17:02Z
generation: 0
- name: np-test-network-policy
+ name: test-network-policy
namespace: default
resourceVersion: "2117"
- selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetpolicies/np-test-network-policy
+ selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetworkpolicies/test-network-policy
uid: afb99326-c634-11e8-b63d-002564fdd760
spec:
egressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
- id: 6297c198-b385-44f3-8b43-29951f933a8f
port_range_max: 5978
port_range_min: 5978
protocol: tcp
- security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
ingressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
- id: f4e11e73-81c6-4c1b-9760-714eedff417b
port_range_max: 6379
port_range_min: 6379
protocol: tcp
- security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
+ status:
securityGroupId: cdee7815-3b49-4a3e-abc8-31e384ab75c5
- securityGroupName: sg-test-network-policy
- networkpolicy_spec:
- egress:
- - to:
- - namespaceSelector:
- matchLabels:
- project: default
- ports:
- - port: 5978
- protocol: TCP
- ingress:
- - from:
- - namespaceSelector:
- matchLabels:
- project: default
- ports:
- - port: 6379
- protocol: TCP
- podSelector:
- matchLabels:
- project: default
- policyTypes:
- - Ingress
- - Egress
+ securityGroupRules:
+ …
$ openstack security group rule list sg-test-network-policy --protocol tcp -c "IP Protocol" -c "Port Range" -c "Direction" --long
+-------------+------------+-----------+
@@ -273,67 +246,41 @@ Testing the network policy support functionality
$ kubectl patch networkpolicy test-network-policy -p '{"spec":{"ingress":[{"ports":[{"port": 8080,"protocol": "TCP"}]}]}}'
networkpolicy "test-network-policy" patched
- $ kubectl get knp np-test-network-policy -o yaml
+ $ kubectl get knp test-network-policy -o yaml
apiVersion: openstack.org/v1
- kind: KuryrNetPolicy
+ kind: KuryrNetworkPolicy
metadata:
annotations:
- networkpolicy_name: test-network-policy
- networkpolicy_namespace: default
- networkpolicy_uid: aee1c59f-c634-11e8-b63d-002564fdd760
+ networkPolicyLink:
clusterName: ""
creationTimestamp: 2018-10-02T11:17:02Z
generation: 0
- name: np-test-network-policy
+ name: test-network-policy
namespace: default
resourceVersion: "1546"
- selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetpolicies/np-test-network-policy
+ selfLink: /apis/openstack.org/v1/namespaces/default/kuryrnetworkpolicies/np-test-network-policy
uid: afb99326-c634-11e8-b63d-002564fdd760
spec:
egressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: egress
ethertype: IPv4
- id: 1969a0b3-55e1-43d7-ba16-005b4ed4cbb7
port_range_max: 5978
port_range_min: 5978
protocol: tcp
- security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
ingressSgRules:
- - security_group_rule:
+ - sgRule:
description: Kuryr-Kubernetes NetPolicy SG rule
direction: ingress
ethertype: IPv4
- id: 6598aa1f-4f94-4fb2-81ce-d3649ba28f33
port_range_max: 8080
port_range_min: 8080
protocol: tcp
- security_group_id: cdee7815-3b49-4a3e-abc8-31e384ab75c5
+ status:
securityGroupId: cdee7815-3b49-4a3e-abc8-31e384ab75c5
- networkpolicy_spec:
- egress:
- - ports:
- - port: 5978
- protocol: TCP
- to:
- - namespaceSelector:
- matchLabels:
- project: default
- ingress:
- - ports:
- - port: 8080
- protocol: TCP
- from:
- - namespaceSelector:
- matchLabels:
- project: default
- podSelector:
- matchLabels:
- project: default
- policyTypes:
- - Ingress
- - Egress
+ securityGroupRules:
+ …
$ openstack security group rule list sg-test-network-policy -c "IP Protocol" -c "Port Range" -c "Direction" --long
+-------------+------------+-----------+
@@ -388,6 +335,6 @@ Testing the network policy support functionality
.. code-block:: console
$ kubectl delete -f network_policy.yml
- $ kubectl get kuryrnetpolicies
+ $ kubectl get kuryrnetworkpolicies
$ kubectl get networkpolicies
$ openstack security group list | grep sg-test-network-policy
diff --git a/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml b/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml
index eb67591b0..0e54a4705 100644
--- a/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml
+++ b/kubernetes_crds/kuryr_crds/kuryrloadbalancer.yaml
@@ -29,13 +29,6 @@ spec:
properties:
spec:
type: object
- required:
- - ip
- - ports
- - project_id
- - security_groups_ids
- - subnet_id
- - type
properties:
ip:
type: string
@@ -46,7 +39,6 @@ spec:
items:
type: object
required:
- - name
- port
- protocol
- targetPort
@@ -69,13 +61,50 @@ spec:
type: string
type:
type: string
+ subsets:
+ type: array
+ items:
+ type: object
+ properties:
+ addresses:
+ type: array
+ items:
+ type: object
+ properties:
+ hostname:
+ type: string
+ ip:
+ type: string
+ nodeName:
+ type: string
+ targetRef:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ resourceVersion:
+ type: string
+ uid:
+ type: string
+ ports:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ port:
+ type: integer
+ protocol:
+ type: string
status:
type: object
- required:
- - listeners
- - loadbalancer
- - members
- - pools
properties:
listeners:
type: array
diff --git a/kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml b/kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml
index dc0a7e07b..ccf3f942a 100644
--- a/kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml
+++ b/kubernetes_crds/kuryr_crds/kuryrnetpolicy.yaml
@@ -9,8 +9,6 @@ spec:
plural: kuryrnetpolicies
singular: kuryrnetpolicy
kind: KuryrNetPolicy
- shortNames:
- - knp
versions:
- name: v1
served: true
diff --git a/kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml b/kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml
new file mode 100644
index 000000000..3726409d8
--- /dev/null
+++ b/kubernetes_crds/kuryr_crds/kuryrnetworkpolicy.yaml
@@ -0,0 +1,158 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kuryrnetworkpolicies.openstack.org
+spec:
+ group: openstack.org
+ scope: Namespaced
+ names:
+ plural: kuryrnetworkpolicies
+ singular: kuryrnetworkpolicy
+ kind: KuryrNetworkPolicy
+ shortNames:
+ - knp
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: SG-ID
+ type: string
+ description: The ID of the SG associated to the policy
+ jsonPath: .status.securityGroupId
+ - name: Age
+ type: date
+ jsonPath: .metadata.creationTimestamp
+ schema:
+ openAPIV3Schema:
+ type: object
+ required:
+ - status
+ - spec
+ properties:
+ spec:
+ type: object
+ required:
+ - egressSgRules
+ - ingressSgRules
+ - podSelector
+ - policyTypes
+ properties:
+ egressSgRules:
+ type: array
+ items:
+ type: object
+ required:
+ - sgRule
+ properties:
+ affectedPods:
+ type: array
+ items:
+ type: object
+ properties:
+ podIP:
+ type: string
+ podNamespace:
+ type: string
+ required:
+ - podIP
+ - podNamespace
+ namespace:
+ type: string
+ sgRule:
+ type: object
+ properties:
+ description:
+ type: string
+ direction:
+ type: string
+ ethertype:
+ type: string
+ port_range_max:
+ type: integer
+ port_range_min:
+ type: integer
+ protocol:
+ type: string
+ remote_ip_prefix:
+ type: string
+ ingressSgRules:
+ type: array
+ items:
+ type: object
+ required:
+ - sgRule
+ properties:
+ affectedPods:
+ type: array
+ items:
+ type: object
+ properties:
+ podIP:
+ type: string
+ podNamespace:
+ type: string
+ required:
+ - podIP
+ - podNamespace
+ namespace:
+ type: string
+ sgRule:
+ type: object
+ properties:
+ description:
+ type: string
+ direction:
+ type: string
+ ethertype:
+ type: string
+ port_range_max:
+ type: integer
+ port_range_min:
+ type: integer
+ protocol:
+ type: string
+ remote_ip_prefix:
+ type: string
+ podSelector:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ policyTypes:
+ type: array
+ items:
+ type: string
+ status:
+ type: object
+ required:
+ - securityGroupRules
+ properties:
+ securityGroupId:
+ type: string
+ securityGroupRules:
+ type: array
+ items:
+ type: object
+ required:
+ - id
+ properties:
+ id:
+ type: string
+ description:
+ type: string
+ direction:
+ type: string
+ ethertype:
+ type: string
+ port_range_max:
+ type: integer
+ port_range_min:
+ type: integer
+ protocol:
+ type: string
+ remote_ip_prefix:
+ type: string
+ security_group_id:
+ type: string
+ podSelector:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
diff --git a/kubernetes_crds/kuryr_crds/kuryrport.yaml b/kubernetes_crds/kuryr_crds/kuryrport.yaml
new file mode 100644
index 000000000..21767300c
--- /dev/null
+++ b/kubernetes_crds/kuryr_crds/kuryrport.yaml
@@ -0,0 +1,48 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kuryrports.openstack.org
+spec:
+ group: openstack.org
+ scope: Namespaced
+ names:
+ plural: kuryrports
+ singular: kuryrport
+ kind: KuryrPort
+ shortNames:
+ - kp
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ required:
+ - podUid
+ - podNodeName
+ - vifs
+ properties:
+ podUid:
+ type: string
+ podNodeName:
+ type: string
+ vifs:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ additionalPrinterColumns:
+ - name: PodUID
+ type: string
+ description: Pod UID
+ jsonPath: .spec.podUid
+ - name: Nodename
+ type: string
+ description: Name of the node corresponding pod lives in
+ jsonPath: .spec.podNodeName
+ - name: labels
+ type: string
+ description: Labels for the CRD
+ jsonPath: .metadata.labels
diff --git a/kuryr_kubernetes/cni/binding/dpdk.py b/kuryr_kubernetes/cni/binding/dpdk.py
index 6a7b2695a..4685582ba 100644
--- a/kuryr_kubernetes/cni/binding/dpdk.py
+++ b/kuryr_kubernetes/cni/binding/dpdk.py
@@ -15,6 +15,7 @@
import os
+from os_vif import objects
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -23,7 +24,6 @@
from kuryr_kubernetes.cni.binding import base as b_base
from kuryr_kubernetes import constants
from kuryr_kubernetes.handlers import health
-from kuryr_kubernetes import utils
from kuryr.lib._i18n import _
@@ -143,42 +143,46 @@ def _remove_pci_file(self, container_id, ifname):
def _set_vif(self, vif):
# TODO(ivc): extract annotation interactions
- state, labels, resource_version = self._get_pod_details(
+ vifs, labels, resource_version, kp_link = self._get_pod_details(
vif.port_profile.selflink)
- for ifname, vif_ex in state.vifs.items():
- if vif.id == vif_ex.id:
- state.vifs[ifname] = vif
+ for ifname, data in vifs.items():
+ if vif.id == data['vif'].id:
+ vifs[ifname] = data
break
- self._set_pod_details(state, vif.port_profile.selflink, labels,
- resource_version)
+ self._set_pod_details(vifs, vif.port_profile.selflink, labels,
+ resource_version, kp_link)
def _get_pod_details(self, selflink):
k8s = clients.get_kubernetes_client()
pod = k8s.get(selflink)
- annotations = pod['metadata']['annotations']
- resource_version = pod['metadata']['resourceVersion']
- labels = pod['metadata'].get('labels')
+ kp = k8s.get(f'{constants.K8S_API_CRD_NAMESPACES}/'
+ f'{pod["metadata"]["namespace"]}/kuryrports/'
+ f'{pod["metadata"]["name"]}')
+
try:
- annotations = annotations[constants.K8S_ANNOTATION_VIF]
- state_annotation = jsonutils.loads(annotations)
- state = utils.extract_pod_annotation(state_annotation)
- except KeyError:
- LOG.exception("No annotations %s", constants.K8S_ANNOTATION_VIF)
+ vifs = {k: {'default': v['default'],
+ 'vif': objects.base.VersionedObject
+ .obj_from_primitive(v['vif'])}
+ for k, v in kp['spec']['vifs'].items()}
+ except (KeyError, AttributeError):
+ LOG.exception(f"No vifs found on KuryrPort: {kp}")
raise
- except ValueError:
- LOG.exception("Unable encode annotations")
- raise
- LOG.info("Got VIFs from annotation: %s", state.vifs)
- return state, labels, resource_version
+ LOG.info(f"Got VIFs from Kuryrport: {vifs}")
- def _set_pod_details(self, state, selflink, labels, resource_version):
- if not state:
- LOG.info("Removing VIFs annotation: %r", state)
- annotation = None
- else:
- state_dict = state.obj_to_primitive()
- annotation = jsonutils.dumps(state_dict, sort_keys=True)
- LOG.info("Setting VIFs annotation: %r", annotation)
+ resource_version = pod['metadata']['resourceVersion']
+ labels = pod['metadata'].get('labels')
+ return vifs, labels, resource_version, kp['metadata']['selflink']
+
+ def _set_pod_details(self, vifs, selflink, labels, resource_version,
+ kp_link):
+ k8s = clients.get_kubernetes_client()
+ if vifs:
+ spec = {k: {'default': v['default'],
+ 'vif': v['vif'].obj_to_primitive()}
+ for k, v in vifs.items()}
+
+ LOG.info("Setting VIFs in KuryrPort %r", spec)
+ k8s.patch_crd('spec', kp_link, {'vifs': spec})
if not labels:
LOG.info("Removing Label annotation: %r", labels)
@@ -187,8 +191,6 @@ def _set_pod_details(self, state, selflink, labels, resource_version):
labels_annotation = jsonutils.dumps(labels, sort_keys=True)
LOG.info("Setting Labels annotation: %r", labels_annotation)
- k8s = clients.get_kubernetes_client()
k8s.annotate(selflink,
- {constants.K8S_ANNOTATION_VIF: annotation,
- constants.K8S_ANNOTATION_LABEL: labels_annotation},
+ {constants.K8S_ANNOTATION_LABEL: labels_annotation},
resource_version=resource_version)
diff --git a/kuryr_kubernetes/cni/daemon/service.py b/kuryr_kubernetes/cni/daemon/service.py
index 8f50a7a55..47491cf5d 100644
--- a/kuryr_kubernetes/cni/daemon/service.py
+++ b/kuryr_kubernetes/cni/daemon/service.py
@@ -20,14 +20,14 @@
import sys
import threading
import time
+import urllib.parse
+import urllib3
import cotyledon
import flask
from pyroute2.ipdb import transactional
-import urllib3
import os_vif
-from os_vif.objects import base
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
@@ -212,10 +212,12 @@ def run(self):
self.pipeline.register(h_cni.CallbackHandler(self.on_done,
self.on_deleted))
self.watcher = k_watcher.Watcher(self.pipeline)
- self.watcher.add(
- "%(base)s/pods?fieldSelector=spec.nodeName=%(node_name)s" % {
- 'base': k_const.K8S_API_BASE,
- 'node_name': self._get_nodename()})
+ query_label = urllib.parse.quote_plus(f'{k_const.KURYRPORT_LABEL}='
+ f'{self._get_nodename()}')
+
+ self.watcher.add(f'{k_const.K8S_API_CRD_KURYRPORTS}'
+ f'?labelSelector={query_label}')
+
self.is_running = True
self.health_thread = threading.Thread(
target=self._start_watcher_health_checker)
@@ -230,55 +232,43 @@ def _start_watcher_health_checker(self):
self.healthy.value = False
time.sleep(HEALTH_CHECKER_DELAY)
- def on_done(self, pod, vifs):
- pod_name = utils.get_pod_unique_name(pod)
- vif_dict = {
- ifname: vif.obj_to_primitive() for
- ifname, vif in vifs.items()
- }
- # NOTE(dulek): We need a lock when modifying shared self.registry dict
- # to prevent race conditions with other processes/threads.
- with lockutils.lock(pod_name, external=True):
- if (pod_name not in self.registry or
- self.registry[pod_name]['pod']['metadata']['uid']
- != pod['metadata']['uid']):
- self.registry[pod_name] = {'pod': pod, 'vifs': vif_dict,
- 'containerid': None,
- 'vif_unplugged': False,
- 'del_received': False}
+ def on_done(self, kuryrport, vifs):
+ kp_name = utils.get_res_unique_name(kuryrport)
+ with lockutils.lock(kp_name, external=True):
+ if (kp_name not in self.registry or
+ self.registry[kp_name]['kp']['metadata']['uid']
+ != kuryrport['metadata']['uid']):
+ self.registry[kp_name] = {'kp': kuryrport,
+ 'vifs': vifs,
+ 'containerid': None,
+ 'vif_unplugged': False,
+ 'del_received': False}
else:
- # NOTE(dulek): Only update vif if its status changed, we don't
- # need to care about other changes now.
- old_vifs = {
- ifname:
- base.VersionedObject.obj_from_primitive(vif_obj) for
- ifname, vif_obj in (
- self.registry[pod_name]['vifs'].items())
- }
+ old_vifs = self.registry[kp_name]['vifs']
for iface in vifs:
if old_vifs[iface].active != vifs[iface].active:
- pod_dict = self.registry[pod_name]
- pod_dict['vifs'] = vif_dict
- self.registry[pod_name] = pod_dict
+ kp_dict = self.registry[kp_name]
+ kp_dict['vifs'] = vifs
+ self.registry[kp_name] = kp_dict
- def on_deleted(self, pod):
- pod_name = utils.get_pod_unique_name(pod)
+ def on_deleted(self, kp):
+ kp_name = utils.get_res_unique_name(kp)
try:
- if pod_name in self.registry:
+ if kp_name in self.registry:
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code for CNI DEL so that
# we delete the registry entry exactly once
- with lockutils.lock(pod_name, external=True):
- if self.registry[pod_name]['vif_unplugged']:
- del self.registry[pod_name]
+ with lockutils.lock(kp_name, external=True):
+ if self.registry[kp_name]['vif_unplugged']:
+ del self.registry[kp_name]
else:
- pod_dict = self.registry[pod_name]
- pod_dict['del_received'] = True
- self.registry[pod_name] = pod_dict
+ kp_dict = self.registry[kp_name]
+ kp_dict['del_received'] = True
+ self.registry[kp_name] = kp_dict
except KeyError:
# This means someone else removed it. It's odd but safe to ignore.
- LOG.debug('Pod %s entry already removed from registry while '
- 'handling DELETED event. Ignoring.', pod_name)
+ LOG.debug('KuryrPort %s entry already removed from registry while '
+ 'handling DELETED event. Ignoring.', kp_name)
pass
def terminate(self):
diff --git a/kuryr_kubernetes/cni/handlers.py b/kuryr_kubernetes/cni/handlers.py
index aded36895..222dabfc3 100644
--- a/kuryr_kubernetes/cni/handlers.py
+++ b/kuryr_kubernetes/cni/handlers.py
@@ -17,18 +17,20 @@
from os_vif import objects as obj_vif
from oslo_log import log as logging
-from oslo_serialization import jsonutils
+from kuryr_kubernetes import clients
from kuryr_kubernetes import constants as k_const
+from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import dispatch as k_dis
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
+
LOG = logging.getLogger(__name__)
class CNIHandlerBase(k8s_base.ResourceEventHandler, metaclass=abc.ABCMeta):
- OBJECT_KIND = k_const.K8S_OBJ_POD
+ OBJECT_KIND = k_const.K8S_OBJ_KURYRPORT
def __init__(self, cni, on_done):
self._cni = cni
@@ -59,16 +61,18 @@ def callback(self):
raise NotImplementedError()
def _get_vifs(self, pod):
- # TODO(ivc): same as VIFHandler._get_vif
+ k8s = clients.get_kubernetes_client()
try:
- annotations = pod['metadata']['annotations']
- state_annotation = annotations[k_const.K8S_ANNOTATION_VIF]
- except KeyError:
+ kuryrport_crd = k8s.get(f'{k_const.K8S_API_CRD_NAMESPACES}/'
+ f'{pod["metadata"]["namespace"]}/'
+ f'kuryrports/{pod["metadata"]["name"]}')
+ LOG.debug("Got CRD: %r", kuryrport_crd)
+ except k_exc.K8sClientException:
return {}
- state_annotation = jsonutils.loads(state_annotation)
- state = utils.extract_pod_annotation(state_annotation)
- vifs_dict = state.vifs
- LOG.debug("Got VIFs from annotation: %r", vifs_dict)
+
+ vifs_dict = utils.get_vifs_from_crd(kuryrport_crd)
+ LOG.debug("Got vifs: %r", vifs_dict)
+
return vifs_dict
def _get_inst(self, pod):
@@ -81,31 +85,32 @@ class CallbackHandler(CNIHandlerBase):
def __init__(self, on_vif, on_del=None):
super(CallbackHandler, self).__init__(None, on_vif)
self._del_callback = on_del
- self._pod = None
+ self._kuryrport = None
self._callback_vifs = None
- def should_callback(self, pod, vifs):
+ def should_callback(self, kuryrport, vifs):
"""Called after all vifs have been processed
- Calls callback if there was at least one vif in the Pod
+ Calls callback if there was at least one vif in the CRD
- :param pod: dict containing Kubernetes Pod object
+ :param kuryrport: dict containing Kubernetes KuryrPort CRD object
:param vifs: dict containing os_vif VIF objects and ifnames
:returns True/False
"""
- self._pod = pod
+ self._kuryrport = kuryrport
self._callback_vifs = vifs
if vifs:
return True
return False
def callback(self):
- self._callback(self._pod, self._callback_vifs)
+ self._callback(self._kuryrport, self._callback_vifs)
- def on_deleted(self, pod):
- LOG.debug("Got pod %s deletion event.", pod['metadata']['name'])
+ def on_deleted(self, kuryrport):
+ LOG.debug("Got kuryrport %s deletion event.",
+ kuryrport['metadata']['name'])
if self._del_callback:
- self._del_callback(pod)
+ self._del_callback(kuryrport)
class CNIPipeline(k_dis.EventPipeline):
diff --git a/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py b/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py
index 13bec826b..57dfbf8ab 100644
--- a/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py
+++ b/kuryr_kubernetes/cni/plugins/k8s_cni_registry.py
@@ -15,7 +15,6 @@
import retrying
from os_vif import objects as obj_vif
-from os_vif.objects import base
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
@@ -31,12 +30,14 @@
CONF = cfg.CONF
RETRY_DELAY = 1000 # 1 second in milliseconds
-# TODO(dulek): Another corner case is (and was) when pod is deleted before it's
-# annotated by controller or even noticed by any watcher. Kubelet
-# will try to delete such vif, but we will have no data about it.
-# This is currently worked around by returning successfully in
-# case of timing out in delete. To solve this properly we need
-# to watch for pod deletes as well.
+# TODO(dulek, gryf): Another corner case is (and was) when pod is deleted
+# before it's corresponding CRD was created and populated by vifs by
+# controller or even noticed by any watcher. Kubelet will try to delete such
+# vif, but we will have no data about it. This is currently worked around by
+# returning successfully in case of timing out in delete. To solve this
+# properly we need to watch for pod deletes as well, or perhaps create
+# finalizer for the pod as soon, as we know, that kuryrport CRD will be
+# created.
class K8sCNIRegistryPlugin(base_cni.CNIPlugin):
@@ -45,32 +46,32 @@ def __init__(self, registry, healthy):
self.registry = registry
self.k8s = clients.get_kubernetes_client()
- def _get_pod_name(self, params):
+ def _get_obj_name(self, params):
return "%(namespace)s/%(name)s" % {
'namespace': params.args.K8S_POD_NAMESPACE,
'name': params.args.K8S_POD_NAME}
def add(self, params):
- pod_name = self._get_pod_name(params)
+ kp_name = self._get_obj_name(params)
timeout = CONF.cni_daemon.vif_annotation_timeout
- # Try to confirm if pod in the registry is not stale cache. If it is,
+ # Try to confirm if CRD in the registry is not stale cache. If it is,
# remove it.
- with lockutils.lock(pod_name, external=True):
- if pod_name in self.registry:
- cached_pod = self.registry[pod_name]['pod']
+ with lockutils.lock(kp_name, external=True):
+ if kp_name in self.registry:
+ cached_kp = self.registry[kp_name]['kp']
try:
- pod = self.k8s.get(cached_pod['metadata']['selfLink'])
+ kp = self.k8s.get(cached_kp['metadata']['selfLink'])
except Exception:
- LOG.exception('Error when getting pod %s', pod_name)
- raise exceptions.ResourceNotReady(pod_name)
+ LOG.exception('Error when getting KuryrPort %s', kp_name)
+ raise exceptions.ResourceNotReady(kp_name)
- if pod['metadata']['uid'] != cached_pod['metadata']['uid']:
- LOG.warning('Stale pod %s detected in cache. (API '
+ if kp['metadata']['uid'] != cached_kp['metadata']['uid']:
+ LOG.warning('Stale KuryrPort %s detected in cache. (API '
'uid=%s, cached uid=%s). Removing it from '
- 'cache.', pod_name, pod['metadata']['uid'],
- cached_pod['metadata']['uid'])
- del self.registry[pod_name]
+ 'cache.', kp_name, kp['metadata']['uid'],
+ cached_kp['metadata']['uid'])
+ del self.registry[kp_name]
vifs = self._do_work(params, b_base.connect, timeout)
@@ -78,70 +79,68 @@ def add(self, params):
# requests that we should ignore. We need a lock to
# prevent race conditions and replace whole object in the
# dict for multiprocessing.Manager to notice that.
- with lockutils.lock(pod_name, external=True):
- d = self.registry[pod_name]
+ with lockutils.lock(kp_name, external=True):
+ d = self.registry[kp_name]
d['containerid'] = params.CNI_CONTAINERID
- self.registry[pod_name] = d
- LOG.debug('Saved containerid = %s for pod %s',
- params.CNI_CONTAINERID, pod_name)
+ self.registry[kp_name] = d
+ LOG.debug('Saved containerid = %s for CRD %s',
+ params.CNI_CONTAINERID, kp_name)
# Wait for timeout sec, 1 sec between tries, retry when even one
# vif is not active.
@retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY,
retry_on_result=utils.any_vif_inactive)
- def wait_for_active(pod_name):
- return {
- ifname: base.VersionedObject.obj_from_primitive(vif_obj) for
- ifname, vif_obj in self.registry[pod_name]['vifs'].items()
- }
+ def wait_for_active(kp_name):
+ return self.registry[kp_name]['vifs']
- vifs = wait_for_active(pod_name)
+ vifs = wait_for_active(kp_name)
for vif in vifs.values():
if not vif.active:
LOG.error("Timed out waiting for vifs to become active")
- raise exceptions.ResourceNotReady(pod_name)
+ raise exceptions.ResourceNotReady(kp_name)
return vifs[k_const.DEFAULT_IFNAME]
def delete(self, params):
- pod_name = self._get_pod_name(params)
+ kp_name = self._get_obj_name(params)
try:
- reg_ci = self.registry[pod_name]['containerid']
- LOG.debug('Read containerid = %s for pod %s', reg_ci, pod_name)
+ reg_ci = self.registry[kp_name]['containerid']
+ LOG.debug('Read containerid = %s for KuryrPort %s', reg_ci,
+ kp_name)
if reg_ci and reg_ci != params.CNI_CONTAINERID:
# NOTE(dulek): This is a DEL request for some older (probably
# failed) ADD call. We should ignore it or we'll
# unplug a running pod.
LOG.warning('Received DEL request for unknown ADD call for '
- 'pod %s (CNI_CONTAINERID=%s). Ignoring.', pod_name,
- params.CNI_CONTAINERID)
+ 'Kuryrport %s (CNI_CONTAINERID=%s). Ignoring.',
+ kp_name, params.CNI_CONTAINERID)
return
except KeyError:
pass
# Passing arbitrary 5 seconds as timeout, as it does not make any sense
- # to wait on CNI DEL. If pod got deleted from API - VIF info is gone.
- # If pod got the annotation removed - it is now gone too. The number's
- # not 0, because we need to anticipate for restarts and delay before
- # registry is populated by watcher.
+ # to wait on CNI DEL. If kuryrport got deleted from API - VIF info is
+ # gone. If kuryrport got the vif info removed - it is now gone too.
+ # The number's not 0, because we need to anticipate for restarts and
+ # delay before registry is populated by watcher.
self._do_work(params, b_base.disconnect, 5)
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code in the watcher to ensure that
# we delete the registry entry exactly once
try:
- with lockutils.lock(pod_name, external=True):
- if self.registry[pod_name]['del_received']:
- del self.registry[pod_name]
+ with lockutils.lock(kp_name, external=True):
+ if self.registry[kp_name]['del_received']:
+ del self.registry[kp_name]
else:
- pod_dict = self.registry[pod_name]
- pod_dict['vif_unplugged'] = True
- self.registry[pod_name] = pod_dict
+ kp_dict = self.registry[kp_name]
+ kp_dict['vif_unplugged'] = True
+ self.registry[kp_name] = kp_dict
except KeyError:
- # This means the pod was removed before vif was unplugged. This
- # shouldn't happen, but we can't do anything about it now
- LOG.debug('Pod %s not found registry while handling DEL request. '
- 'Ignoring.', pod_name)
+ # This means the kuryrport was removed before vif was unplugged.
+ # This shouldn't happen, but we can't do anything about it now
+ LOG.debug('KuryrPort %s not found registry while handling DEL '
+ 'request. Ignoring.', kp_name)
pass
def report_drivers_health(self, driver_healthy):
@@ -151,25 +150,22 @@ def report_drivers_health(self, driver_healthy):
self.healthy.value = driver_healthy
def _do_work(self, params, fn, timeout):
- pod_name = self._get_pod_name(params)
+ kp_name = self._get_obj_name(params)
# In case of KeyError retry for `timeout` s, wait 1 s between tries.
@retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY,
retry_on_exception=lambda e: isinstance(e, KeyError))
def find():
- return self.registry[pod_name]
+ return self.registry[kp_name]
try:
d = find()
- pod = d['pod']
- vifs = {
- ifname: base.VersionedObject.obj_from_primitive(vif_obj) for
- ifname, vif_obj in d['vifs'].items()
- }
+ kp = d['kp']
+ vifs = d['vifs']
except KeyError:
- LOG.error("Timed out waiting for requested pod to appear in "
+ LOG.error("Timed out waiting for requested KuryrPort to appear in "
"registry")
- raise exceptions.ResourceNotReady(pod_name)
+ raise exceptions.ResourceNotReady(kp_name)
for ifname, vif in vifs.items():
is_default_gateway = (ifname == k_const.DEFAULT_IFNAME)
@@ -178,12 +174,13 @@ def find():
# use the ifname supplied in the CNI ADD request
ifname = params.CNI_IFNAME
- fn(vif, self._get_inst(pod), ifname, params.CNI_NETNS,
+ fn(vif, self._get_inst(kp), ifname, params.CNI_NETNS,
report_health=self.report_drivers_health,
is_default_gateway=is_default_gateway,
container_id=params.CNI_CONTAINERID)
return vifs
- def _get_inst(self, pod):
- return obj_vif.instance_info.InstanceInfo(
- uuid=pod['metadata']['uid'], name=pod['metadata']['name'])
+ def _get_inst(self, kp):
+ return (obj_vif.instance_info
+ .InstanceInfo(uuid=kp['spec']['podUid'],
+ name=kp['metadata']['name']))
diff --git a/kuryr_kubernetes/constants.py b/kuryr_kubernetes/constants.py
index c311d903d..3a1732530 100644
--- a/kuryr_kubernetes/constants.py
+++ b/kuryr_kubernetes/constants.py
@@ -13,15 +13,21 @@
# License for the specific language governing permissions and limitations
# under the License.
+KURYR_FQDN = 'kuryr.openstack.org'
+
K8S_API_BASE = '/api/v1'
K8S_API_NAMESPACES = K8S_API_BASE + '/namespaces'
-K8S_API_CRD = '/apis/openstack.org/v1'
+K8S_API_CRD_VERSION = 'openstack.org/v1'
+K8S_API_CRD = '/apis/' + K8S_API_CRD_VERSION
K8S_API_CRD_NAMESPACES = K8S_API_CRD + '/namespaces'
K8S_API_CRD_KURYRNETS = K8S_API_CRD + '/kuryrnets'
K8S_API_CRD_KURYRNETWORKS = K8S_API_CRD + '/kuryrnetworks'
K8S_API_CRD_KURYRNETPOLICIES = K8S_API_CRD + '/kuryrnetpolicies'
+K8S_API_CRD_KURYRNETWORKPOLICIES = K8S_API_CRD + '/kuryrnetworkpolicies'
K8S_API_CRD_KURYRLOADBALANCERS = K8S_API_CRD + '/kuryrloadbalancers'
+K8S_API_CRD_KURYRPORTS = K8S_API_CRD + '/kuryrports'
K8S_API_POLICIES = '/apis/networking.k8s.io/v1/networkpolicies'
+K8S_API_NETWORKING = '/apis/networking.k8s.io/v1'
K8S_API_NPWG_CRD = '/apis/k8s.cni.cncf.io/v1'
@@ -33,7 +39,9 @@
K8S_OBJ_KURYRNET = 'KuryrNet'
K8S_OBJ_KURYRNETWORK = 'KuryrNetwork'
K8S_OBJ_KURYRNETPOLICY = 'KuryrNetPolicy'
+K8S_OBJ_KURYRNETWORKPOLICY = 'KuryrNetworkPolicy'
K8S_OBJ_KURYRLOADBALANCER = 'KuryrLoadBalancer'
+K8S_OBJ_KURYRPORT = 'KuryrPort'
K8S_POD_STATUS_PENDING = 'Pending'
K8S_POD_STATUS_SUCCEEDED = 'Succeeded'
@@ -42,11 +50,13 @@
K8S_ANNOTATION_PREFIX = 'openstack.org/kuryr'
K8S_ANNOTATION_VIF = K8S_ANNOTATION_PREFIX + '-vif'
K8S_ANNOTATION_LABEL = K8S_ANNOTATION_PREFIX + '-pod-label'
+K8S_ANNOTATION_IP = K8S_ANNOTATION_PREFIX + '-pod-ip'
K8S_ANNOTATION_NAMESPACE_LABEL = K8S_ANNOTATION_PREFIX + '-namespace-label'
K8S_ANNOTATION_LBAAS_SPEC = K8S_ANNOTATION_PREFIX + '-lbaas-spec'
K8S_ANNOTATION_LBAAS_STATE = K8S_ANNOTATION_PREFIX + '-lbaas-state'
K8S_ANNOTATION_NET_CRD = K8S_ANNOTATION_PREFIX + '-net-crd'
K8S_ANNOTATION_NETPOLICY_CRD = K8S_ANNOTATION_PREFIX + '-netpolicy-crd'
+K8S_ANNOTATION_POLICY = K8S_ANNOTATION_PREFIX + '-counter'
K8S_ANNOTATION_NPWG_PREFIX = 'k8s.v1.cni.cncf.io'
K8S_ANNOTATION_NPWG_NETWORK = K8S_ANNOTATION_NPWG_PREFIX + '/networks'
@@ -59,7 +69,14 @@
K8S_ANNOTATION_CURRENT_DRIVER = 'current_driver'
K8S_ANNOTATION_NEUTRON_PORT = 'neutron_id'
+POD_FINALIZER = KURYR_FQDN + '/pod-finalizer'
KURYRNETWORK_FINALIZER = 'kuryrnetwork.finalizers.kuryr.openstack.org'
+KURYRLB_FINALIZER = 'kuryr.openstack.org/kuryrloadbalancer-finalizers'
+SERVICE_FINALIZER = 'kuryr.openstack.org/service-finalizer'
+NETWORKPOLICY_FINALIZER = 'kuryr.openstack.org/networkpolicy-finalizer'
+
+KURYRPORT_FINALIZER = KURYR_FQDN + '/kuryrport-finalizer'
+KURYRPORT_LABEL = KURYR_FQDN + '/nodeName'
K8S_OS_VIF_NOOP_PLUGIN = "noop"
diff --git a/kuryr_kubernetes/controller/drivers/base.py b/kuryr_kubernetes/controller/drivers/base.py
index 7121044b4..455d746c9 100644
--- a/kuryr_kubernetes/controller/drivers/base.py
+++ b/kuryr_kubernetes/controller/drivers/base.py
@@ -697,13 +697,10 @@ class NetworkPolicyDriver(DriverBase, metaclass=abc.ABCMeta):
ALIAS = 'network_policy'
@abc.abstractmethod
- def ensure_network_policy(self, policy, project_id):
+ def ensure_network_policy(self, policy):
"""Policy created or updated
:param policy: dict containing Kubernetes NP object
- :param project_id: openstack project_id
- :returns: list of Pod objects affected by the network policy
- creation or its podSelector modification
"""
raise NotImplementedError()
@@ -711,7 +708,7 @@ def ensure_network_policy(self, policy, project_id):
def release_network_policy(self, kuryrnetpolicy):
"""Delete a network policy
- :param kuryrnetpolicy: dict containing Kuryrnetpolicy CRD object
+ :param kuryrnetpolicy: dict containing NetworkPolicy object
"""
raise NotImplementedError()
@@ -729,18 +726,6 @@ def affected_pods(self, policy, selector=None):
"""
raise NotImplementedError()
- @abc.abstractmethod
- def knps_on_namespace(self, namespace):
- """Check if there si kuryr network policy CRDs on the namespace
-
- This method returns true if there are knps on the specified namespace
- or false otherwise
-
- :param namespace: namespace name where the knps CRDs should be
- :returns: true if knps CRDs on the namespace, false otherwise
- """
- raise NotImplementedError()
-
@abc.abstractmethod
def namespaced_pods(self, policy):
"""Return pods on the policy namespace
@@ -752,15 +737,6 @@ def namespaced_pods(self, policy):
"""
raise NotImplementedError()
- @abc.abstractmethod
- def get_kuryrnetpolicy_crd(self, policy):
- """Return kuryrnetpolicy CRD object associated to the policy
-
- :param policy: dict containing Kubernetes NP object
- :returns: kuryrnetpolicy CRD object associated to the policy
- """
- raise NotImplementedError()
-
class NetworkPolicyProjectDriver(DriverBase, metaclass=abc.ABCMeta):
"""Get an OpenStack project id for K8s network policies"""
diff --git a/kuryr_kubernetes/controller/drivers/lb_public_ip.py b/kuryr_kubernetes/controller/drivers/lb_public_ip.py
index 6fb24041e..4354a5758 100644
--- a/kuryr_kubernetes/controller/drivers/lb_public_ip.py
+++ b/kuryr_kubernetes/controller/drivers/lb_public_ip.py
@@ -15,7 +15,6 @@
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes.controller.drivers import public_ip
-from kuryr_kubernetes.objects import lbaas as obj_lbaas
from oslo_config import cfg
from oslo_log import log as logging
@@ -50,10 +49,11 @@ def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id,
res_id = self._drv_pub_ip.is_ip_available(user_specified_ip,
port_id_to_be_associated)
if res_id:
- service_pub_ip_info = (obj_lbaas.LBaaSPubIp(
- ip_id=res_id,
- ip_addr=str(user_specified_ip),
- alloc_method='user'))
+ service_pub_ip_info = {
+ 'ip_id': res_id,
+ 'ip_addr': str(user_specified_ip),
+ 'alloc_method': 'user'
+ }
return service_pub_ip_info
else:
@@ -78,32 +78,34 @@ def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id,
LOG.exception("Failed to allocate public IP - net_id:%s",
public_network_id)
return None
- service_pub_ip_info = obj_lbaas.LBaaSPubIp(ip_id=res_id,
- ip_addr=alloc_ip_addr,
- alloc_method='pool')
+ service_pub_ip_info = {
+ 'ip_id': res_id,
+ 'ip_addr': alloc_ip_addr,
+ 'alloc_method': 'pool'
+ }
return service_pub_ip_info
def release_pub_ip(self, service_pub_ip_info):
if not service_pub_ip_info:
return True
- if service_pub_ip_info.alloc_method == 'pool':
- retcode = self._drv_pub_ip.free_ip(service_pub_ip_info.ip_id)
+ if service_pub_ip_info['alloc_method'] == 'pool':
+ retcode = self._drv_pub_ip.free_ip(service_pub_ip_info['ip_id'])
if not retcode:
LOG.error("Failed to delete public_ip_id =%s !",
- service_pub_ip_info.ip_id)
+ service_pub_ip_info['ip_id'])
return False
return True
def associate_pub_ip(self, service_pub_ip_info, vip_port_id):
if (not service_pub_ip_info or
not vip_port_id or
- not service_pub_ip_info.ip_id):
+ not service_pub_ip_info['ip_id']):
return
self._drv_pub_ip.associate(
- service_pub_ip_info.ip_id, vip_port_id)
+ service_pub_ip_info['ip_id'], vip_port_id)
def disassociate_pub_ip(self, service_pub_ip_info):
- if not service_pub_ip_info or not service_pub_ip_info.ip_id:
+ if not service_pub_ip_info or not service_pub_ip_info['ip_id']:
return
- self._drv_pub_ip.disassociate(service_pub_ip_info.ip_id)
+ self._drv_pub_ip.disassociate(service_pub_ip_info['ip_id'])
diff --git a/kuryr_kubernetes/controller/drivers/lbaasv2.py b/kuryr_kubernetes/controller/drivers/lbaasv2.py
index 0f577c0ab..4eca391d8 100644
--- a/kuryr_kubernetes/controller/drivers/lbaasv2.py
+++ b/kuryr_kubernetes/controller/drivers/lbaasv2.py
@@ -27,7 +27,6 @@
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes import exceptions as k_exc
-from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes import utils
CONF = cfg.CONF
@@ -112,7 +111,7 @@ def get_service_loadbalancer_name(self, namespace, svc_name):
return "%s/%s" % (namespace, svc_name)
def get_loadbalancer_pool_name(self, loadbalancer, namespace, svc_name):
- return "%s/%s/%s" % (loadbalancer.name, namespace, svc_name)
+ return "%s/%s/%s" % (loadbalancer['name'], namespace, svc_name)
def add_tags(self, resource, req):
if CONF.neutron_defaults.resource_tags:
@@ -126,9 +125,14 @@ def add_tags(self, resource, req):
def ensure_loadbalancer(self, name, project_id, subnet_id, ip,
security_groups_ids=None, service_type=None,
provider=None):
- request = obj_lbaas.LBaaSLoadBalancer(
- name=name, project_id=project_id, subnet_id=subnet_id, ip=ip,
- security_groups=security_groups_ids, provider=provider)
+ request = {
+ 'name': name,
+ 'project_id': project_id,
+ 'subnet_id': subnet_id,
+ 'ip': ip,
+ 'security_groups': security_groups_ids,
+ 'provider': provider
+ }
response = self._ensure(self._create_loadbalancer,
self._find_loadbalancer, request)
if not response:
@@ -146,9 +150,8 @@ def release_loadbalancer(self, loadbalancer):
loadbalancer,
loadbalancer,
lbaas.delete_load_balancer,
- loadbalancer.id,
+ loadbalancer['id'],
cascade=True)
-
self._wait_for_deletion(loadbalancer, _ACTIVATION_TIMEOUT)
def _create_listeners_acls(self, loadbalancer, port, target_port,
@@ -160,7 +163,7 @@ def _create_listeners_acls(self, loadbalancer, port, target_port,
if new_sgs:
sgs = new_sgs
else:
- sgs = loadbalancer.security_groups
+ sgs = loadbalancer['security_groups']
# Check if Network Policy allows listener on the pods
for sg in sgs:
@@ -210,7 +213,7 @@ def _apply_members_security_groups(self, loadbalancer, port, target_port,
if vip_port:
lb_sg = vip_port.security_group_ids[0]
else:
- LOG.debug("Skipping sg update for lb %s", loadbalancer.name)
+ LOG.debug("Skipping sg update for lb %s", loadbalancer['name'])
return
# NOTE (maysams) It might happen that the update of LBaaS SG
@@ -225,14 +228,14 @@ def _apply_members_security_groups(self, loadbalancer, port, target_port,
return
lbaas_sg_rules = os_net.security_group_rules(
- security_group_id=lb_sg, project_id=loadbalancer.project_id)
+ security_group_id=lb_sg, project_id=loadbalancer['project_id'])
all_pod_rules = []
add_default_rules = False
if new_sgs:
sgs = new_sgs
else:
- sgs = loadbalancer.security_groups
+ sgs = loadbalancer['security_groups']
sg_rule_ethertype = k_const.IPv4
if utils.get_service_subnet_version() == k_const.IP_VERSION_6:
@@ -325,12 +328,14 @@ def _is_default_rule(self, rule):
def ensure_listener(self, loadbalancer, protocol, port,
service_type='ClusterIP'):
- name = "%s:%s:%s" % (loadbalancer.name, protocol, port)
- listener = obj_lbaas.LBaaSListener(name=name,
- project_id=loadbalancer.project_id,
- loadbalancer_id=loadbalancer.id,
- protocol=protocol,
- port=port)
+ name = "%s:%s:%s" % (loadbalancer['name'], protocol, port)
+ listener = {
+ 'name': name,
+ 'project_id': loadbalancer['project_id'],
+ 'loadbalancer_id': loadbalancer['id'],
+ 'protocol': protocol,
+ 'port': port
+ }
try:
result = self._ensure_provisioned(
loadbalancer, listener, self._create_listener,
@@ -348,7 +353,7 @@ def ensure_listener(self, loadbalancer, protocol, port,
os_net = clients.get_network_client()
vip_port = self._get_vip_port(loadbalancer)
os_net.update_port(vip_port.id, security_groups=[])
- loadbalancer.security_groups = []
+ loadbalancer['security_groups'] = []
return result
@@ -357,7 +362,7 @@ def release_listener(self, loadbalancer, listener):
lbaas = clients.get_loadbalancer_client()
self._release(loadbalancer, listener,
lbaas.delete_listener,
- listener.id)
+ listener['id'])
# NOTE(maysams): since lbs created with ovn-octavia provider
# does not have a sg in place, only need to delete sg rules
@@ -367,19 +372,22 @@ def release_listener(self, loadbalancer, listener):
sg_id = self._get_vip_port(loadbalancer).security_group_ids[0]
if sg_id:
rules = os_net.security_group_rules(security_group_id=sg_id,
- description=listener.name)
+ description=listener[
+ 'name'])
try:
os_net.delete_security_group_rule(next(rules).id)
except StopIteration:
LOG.warning('Cannot find SG rule for %s (%s) listener.',
- listener.id, listener.name)
+ listener['id'], listener['name'])
def ensure_pool(self, loadbalancer, listener):
- pool = obj_lbaas.LBaaSPool(name=listener.name,
- project_id=loadbalancer.project_id,
- loadbalancer_id=loadbalancer.id,
- listener_id=listener.id,
- protocol=listener.protocol)
+ pool = {
+ 'name': listener['name'],
+ 'project_id': loadbalancer['project_id'],
+ 'loadbalancer_id': loadbalancer['id'],
+ 'listener_id': listener['id'],
+ 'protocol': listener['protocol']
+ }
return self._ensure_provisioned(loadbalancer, pool,
self._create_pool,
self._find_pool)
@@ -388,30 +396,34 @@ def ensure_pool_attached_to_lb(self, loadbalancer, namespace,
svc_name, protocol):
name = self.get_loadbalancer_pool_name(loadbalancer,
namespace, svc_name)
- pool = obj_lbaas.LBaaSPool(name=name,
- project_id=loadbalancer.project_id,
- loadbalancer_id=loadbalancer.id,
- listener_id=None,
- protocol=protocol)
+ pool = {
+ 'name': name,
+ 'project_id': loadbalancer['project_id'],
+ 'loadbalancer_id': loadbalancer['id'],
+ 'listener_id': None,
+ 'protocol': protocol
+ }
return self._ensure_provisioned(loadbalancer, pool,
self._create_pool,
self._find_pool_by_name)
def release_pool(self, loadbalancer, pool):
lbaas = clients.get_loadbalancer_client()
- self._release(loadbalancer, pool, lbaas.delete_pool, pool.id)
+ self._release(loadbalancer, pool, lbaas.delete_pool, pool['id'])
def ensure_member(self, loadbalancer, pool,
subnet_id, ip, port, target_ref_namespace,
target_ref_name, listener_port=None):
name = ("%s/%s" % (target_ref_namespace, target_ref_name))
name += ":%s" % port
- member = obj_lbaas.LBaaSMember(name=name,
- project_id=loadbalancer.project_id,
- pool_id=pool.id,
- subnet_id=subnet_id,
- ip=ip,
- port=port)
+ member = {
+ 'name': name,
+ 'project_id': loadbalancer['project_id'],
+ 'pool_id': pool['id'],
+ 'subnet_id': subnet_id,
+ 'ip': ip,
+ 'port': port
+ }
result = self._ensure_provisioned(loadbalancer, member,
self._create_member,
self._find_member)
@@ -421,9 +433,9 @@ def ensure_member(self, loadbalancer, pool,
CONF.kubernetes.service_security_groups_driver == 'policy')
if (network_policy and CONF.octavia_defaults.enforce_sg_rules and
listener_port):
- protocol = pool.protocol
- sg_rule_name = pool.name
- listener_id = pool.listener_id
+ protocol = pool['protocol']
+ sg_rule_name = pool['name']
+ listener_id = pool['listener_id']
self._apply_members_security_groups(loadbalancer, listener_port,
port, protocol, sg_rule_name,
listener_id)
@@ -431,14 +443,14 @@ def ensure_member(self, loadbalancer, pool,
def release_member(self, loadbalancer, member):
lbaas = clients.get_loadbalancer_client()
- self._release(loadbalancer, member, lbaas.delete_member, member.id,
- member.pool_id)
+ self._release(loadbalancer, member, lbaas.delete_member, member['id'],
+ member['pool_id'])
def _get_vip_port(self, loadbalancer):
os_net = clients.get_network_client()
try:
- fixed_ips = ['subnet_id=%s' % str(loadbalancer.subnet_id),
- 'ip_address=%s' % str(loadbalancer.ip)]
+ fixed_ips = ['subnet_id=%s' % str(loadbalancer['subnet_id']),
+ 'ip_address=%s' % str(loadbalancer['ip'])]
ports = os_net.ports(fixed_ips=fixed_ips)
except os_exc.SDKException:
LOG.error("Port with fixed ips %s not found!", fixed_ips)
@@ -451,43 +463,43 @@ def _get_vip_port(self, loadbalancer):
def _create_loadbalancer(self, loadbalancer):
request = {
- 'name': loadbalancer.name,
- 'project_id': loadbalancer.project_id,
- 'vip_address': str(loadbalancer.ip),
- 'vip_subnet_id': loadbalancer.subnet_id,
+ 'name': loadbalancer['name'],
+ 'project_id': loadbalancer['project_id'],
+ 'vip_address': str(loadbalancer['ip']),
+ 'vip_subnet_id': loadbalancer['subnet_id'],
}
- if loadbalancer.provider is not None:
- request['provider'] = loadbalancer.provider
+ if loadbalancer['provider'] is not None:
+ request['provider'] = loadbalancer['provider']
self.add_tags('loadbalancer', request)
lbaas = clients.get_loadbalancer_client()
response = lbaas.create_load_balancer(**request)
- loadbalancer.id = response.id
- loadbalancer.port_id = self._get_vip_port(loadbalancer).id
- if (loadbalancer.provider is not None and
- loadbalancer.provider != response.provider):
+ loadbalancer['id'] = response.id
+ loadbalancer['port_id'] = self._get_vip_port(loadbalancer).id
+ if (loadbalancer['provider'] is not None and
+ loadbalancer['provider'] != response.provider):
LOG.error("Request provider(%s) != Response provider(%s)",
- loadbalancer.provider, response.provider)
+ loadbalancer['provider'], response.provider)
return None
- loadbalancer.provider = response.provider
+ loadbalancer['provider'] = response.provider
return loadbalancer
def _find_loadbalancer(self, loadbalancer):
lbaas = clients.get_loadbalancer_client()
response = lbaas.load_balancers(
- name=loadbalancer.name,
- project_id=loadbalancer.project_id,
- vip_address=str(loadbalancer.ip),
- vip_subnet_id=loadbalancer.subnet_id,
- provider=loadbalancer.provider)
+ name=loadbalancer['name'],
+ project_id=loadbalancer['project_id'],
+ vip_address=str(loadbalancer['ip']),
+ vip_subnet_id=loadbalancer['subnet_id'],
+ provider=loadbalancer['provider'])
try:
os_lb = next(response) # openstacksdk returns a generator
- loadbalancer.id = os_lb.id
- loadbalancer.port_id = self._get_vip_port(loadbalancer).id
- loadbalancer.provider = os_lb.provider
+ loadbalancer['id'] = os_lb.id
+ loadbalancer['port_id'] = self._get_vip_port(loadbalancer).id
+ loadbalancer['provider'] = os_lb.provider
if os_lb.provisioning_status == 'ERROR':
self.release_loadbalancer(loadbalancer)
return None
@@ -498,16 +510,16 @@ def _find_loadbalancer(self, loadbalancer):
def _create_listener(self, listener):
request = {
- 'name': listener.name,
- 'project_id': listener.project_id,
- 'loadbalancer_id': listener.loadbalancer_id,
- 'protocol': listener.protocol,
- 'protocol_port': listener.port,
+ 'name': listener['name'],
+ 'project_id': listener['project_id'],
+ 'loadbalancer_id': listener['loadbalancer_id'],
+ 'protocol': listener['protocol'],
+ 'protocol_port': listener['port'],
}
self.add_tags('listener', request)
lbaas = clients.get_loadbalancer_client()
response = lbaas.create_listener(**request)
- listener.id = response.id
+ listener['id'] = response.id
return listener
def _update_listener_acls(self, loadbalancer, listener_id, allowed_cidrs):
@@ -538,15 +550,15 @@ def _update_listener_acls(self, loadbalancer, listener_id, allowed_cidrs):
def _find_listener(self, listener, loadbalancer):
lbaas = clients.get_loadbalancer_client()
response = lbaas.listeners(
- name=listener.name,
- project_id=listener.project_id,
- load_balancer_id=listener.loadbalancer_id,
- protocol=listener.protocol,
- protocol_port=listener.port)
+ name=listener['name'],
+ project_id=listener['project_id'],
+ load_balancer_id=listener['loadbalancer_id'],
+ protocol=listener['protocol'],
+ protocol_port=listener['port'])
try:
os_listener = next(response)
- listener.id = os_listener.id
+ listener['id'] = os_listener.id
if os_listener.provisioning_status == 'ERROR':
LOG.debug("Releasing listener %s", os_listener.id)
self.release_listener(loadbalancer, listener)
@@ -560,34 +572,34 @@ def _create_pool(self, pool):
# TODO(ivc): make lb_algorithm configurable
lb_algorithm = CONF.octavia_defaults.lb_algorithm
request = {
- 'name': pool.name,
- 'project_id': pool.project_id,
- 'listener_id': pool.listener_id,
- 'loadbalancer_id': pool.loadbalancer_id,
- 'protocol': pool.protocol,
+ 'name': pool['name'],
+ 'project_id': pool['project_id'],
+ 'listener_id': pool['listener_id'],
+ 'loadbalancer_id': pool['loadbalancer_id'],
+ 'protocol': pool['protocol'],
'lb_algorithm': lb_algorithm,
}
self.add_tags('pool', request)
lbaas = clients.get_loadbalancer_client()
response = lbaas.create_pool(**request)
- pool.id = response.id
+ pool['id'] = response.id
return pool
def _find_pool(self, pool, loadbalancer, by_listener=True):
lbaas = clients.get_loadbalancer_client()
response = lbaas.pools(
- name=pool.name,
- project_id=pool.project_id,
- loadbalancer_id=pool.loadbalancer_id,
- protocol=pool.protocol)
-
+ name=pool['name'],
+ project_id=pool['project_id'],
+ loadbalancer_id=pool['loadbalancer_id'],
+ protocol=pool['protocol'])
+ # TODO(scavnic) check response
try:
if by_listener:
- pools = [p for p in response if pool.listener_id
+ pools = [p for p in response if pool['listener_id']
in {listener['id'] for listener in p.listeners}]
else:
pools = [p for p in response if pool.name == p.name]
- pool.id = pools[0].id
+ pool['id'] = pools[0].id
if pools[0].provisioning_status == 'ERROR':
LOG.debug("Releasing pool %s", pool.id)
self.release_pool(loadbalancer, pool)
@@ -601,31 +613,31 @@ def _find_pool_by_name(self, pool, loadbalancer):
def _create_member(self, member):
request = {
- 'name': member.name,
- 'project_id': member.project_id,
- 'subnet_id': member.subnet_id,
- 'address': str(member.ip),
- 'protocol_port': member.port,
+ 'name': member['name'],
+ 'project_id': member['project_id'],
+ 'subnet_id': member['subnet_id'],
+ 'address': str(member['ip']),
+ 'protocol_port': member['port'],
}
self.add_tags('member', request)
lbaas = clients.get_loadbalancer_client()
- response = lbaas.create_member(member.pool_id, **request)
- member.id = response.id
+ response = lbaas.create_member(member['pool_id'], **request)
+ member['id'] = response.id
return member
def _find_member(self, member, loadbalancer):
lbaas = clients.get_loadbalancer_client()
response = lbaas.members(
- member.pool_id,
- name=member.name,
- project_id=member.project_id,
- subnet_id=member.subnet_id,
- address=member.ip,
- protocol_port=member.port)
+ member['pool_id'],
+ name=member['name'],
+ project_id=member['project_id'],
+ subnet_id=member['subnet_id'],
+ address=member['ip'],
+ protocol_port=member['port'])
try:
os_members = next(response)
- member.id = os_members.id
+ member['id'] = os_members.id
if os_members.provisioning_status == 'ERROR':
LOG.debug("Releasing Member %s", os_members.id)
self.release_member(loadbalancer, member)
@@ -683,7 +695,7 @@ def _wait_for_provisioning(self, loadbalancer, timeout,
lbaas = clients.get_loadbalancer_client()
for remaining in self._provisioning_timer(timeout, interval):
- response = lbaas.get_load_balancer(loadbalancer.id)
+ response = lbaas.get_load_balancer(loadbalancer['id'])
status = response.provisioning_status
if status == 'ACTIVE':
LOG.debug("Provisioning complete for %(lb)s", {
@@ -691,7 +703,7 @@ def _wait_for_provisioning(self, loadbalancer, timeout,
return
elif status == 'ERROR':
LOG.debug("Releasing loadbalancer %s with error status",
- loadbalancer.id)
+ loadbalancer['id'])
self.release_loadbalancer(loadbalancer)
break
else:
@@ -708,7 +720,7 @@ def _wait_for_deletion(self, loadbalancer, timeout,
for remaining in self._provisioning_timer(timeout, interval):
try:
- lbaas.get_load_balancer(loadbalancer.id)
+ lbaas.get_load_balancer(loadbalancer['id'])
except os_exc.NotFoundException:
return
@@ -737,24 +749,41 @@ def update_lbaas_sg(self, service, sgs):
endpoints_link = utils.get_endpoints_link(service)
k8s = clients.get_kubernetes_client()
try:
- endpoint = k8s.get(endpoints_link)
+ k8s.get(endpoints_link)
except k_exc.K8sResourceNotFound:
LOG.debug("Endpoint not Found. Skipping LB SG update for "
"%s as the LB resources are not present", lbaas_name)
return
- lbaas = utils.get_lbaas_state(endpoint)
- if not lbaas:
- LOG.debug('Endpoint not yet annotated with lbaas state.')
+ try:
+ klb = k8s.get(f'{k_const.K8S_API_CRD_NAMESPACES}/{svc_namespace}/'
+ f'kuryrloadbalancers/{svc_name}')
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('No KuryrLoadBalancer for service %s created yet.',
+ lbaas_name)
+ raise k_exc.ResourceNotReady(svc_name)
+
+ if (not klb.get('status', {}).get('loadbalancer') or
+ klb.get('status', {}).get('listeners') is None):
+ LOG.debug('KuryrLoadBalancer for service %s not populated yet.',
+ lbaas_name)
raise k_exc.ResourceNotReady(svc_name)
- lbaas_obj = lbaas.loadbalancer
- lbaas_obj.security_groups = sgs
+ klb['status']['loadbalancer']['security_groups'] = sgs
- utils.set_lbaas_state(endpoint, lbaas)
+ lb = klb['status']['loadbalancer']
+ try:
+ k8s.patch_crd('status/loadbalancer', klb['metadata']['selfLink'],
+ {'security_groups': sgs})
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadBalancer CRD not found %s', lbaas_name)
+ return
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryLoadBalancer CRD %s', lbaas_name)
+ raise
- lsnr_ids = {(listener.protocol, listener.port): listener.id
- for listener in lbaas.listeners}
+ lsnr_ids = {(listener['protocol'], listener['port']): listener['id']
+ for listener in klb['status']['listeners']}
for port in svc_ports:
port_protocol = port['protocol']
@@ -767,6 +796,6 @@ def update_lbaas_sg(self, service, sgs):
"%s and port %s. Skipping", port_protocol,
lbaas_port)
continue
- self._apply_members_security_groups(lbaas_obj, lbaas_port,
+ self._apply_members_security_groups(lb, lbaas_port,
target_port, port_protocol,
sg_rule_name, listener_id, sgs)
diff --git a/kuryr_kubernetes/controller/drivers/network_policy.py b/kuryr_kubernetes/controller/drivers/network_policy.py
index a8089e9eb..b92da637e 100644
--- a/kuryr_kubernetes/controller/drivers/network_policy.py
+++ b/kuryr_kubernetes/controller/drivers/network_policy.py
@@ -38,91 +38,94 @@ def __init__(self):
self.os_net = clients.get_network_client()
self.kubernetes = clients.get_kubernetes_client()
- def ensure_network_policy(self, policy, project_id):
+ def ensure_network_policy(self, policy):
"""Create security group rules out of network policies
Triggered by events from network policies, this method ensures that
- security groups and security group rules are created or updated in
- reaction to kubernetes network policies events.
-
- In addition it returns the pods affected by the policy:
- - Creation: pods on the namespace of the created policy
- - Update: pods that needs to be updated in case of PodSelector
- modification, i.e., the pods that were affected by the previous
- PodSelector
+ KuryrNetworkPolicy object is created with the security group rules
+ definitions required to represent the NetworkPolicy.
"""
LOG.debug("Creating network policy %s", policy['metadata']['name'])
- if self.get_kuryrnetpolicy_crd(policy):
- previous_selector = (
- self.update_security_group_rules_from_network_policy(policy))
- if previous_selector or previous_selector == {}:
- return self.affected_pods(policy, previous_selector)
- if previous_selector is None:
- return self.namespaced_pods(policy)
+ i_rules, e_rules = self._get_security_group_rules_from_network_policy(
+ policy)
+
+ knp = self._get_knp_crd(policy)
+ if not knp:
+ self._create_knp_crd(policy, i_rules, e_rules)
else:
- self.create_security_group_rules_from_network_policy(policy,
- project_id)
+ self._patch_knp_crd(policy, i_rules, e_rules, knp)
+
+ def _convert_old_sg_rule(self, rule):
+ del rule['security_group_rule']['id']
+ del rule['security_group_rule']['security_group_id']
+ result = {
+ 'sgRule': rule['security_group_rule'],
+ }
+
+ if 'namespace' in rule:
+ result['namespace'] = rule['namespace']
+
+ if 'remote_ip_prefixes' in rule:
+ result['affectedPods'] = []
+ for ip, namespace in rule['remote_ip_prefixes']:
+ result['affectedPods'].append({
+ 'podIP': ip,
+ 'podNamespace': namespace,
+ })
+
+ return result
+
+ def get_from_old_crd(self, netpolicy):
+ name = netpolicy['metadata']['name'][3:] # Remove 'np-'
+ namespace = netpolicy['metadata']['namespace']
+ link = (f'{constants.K8S_API_NETWORKING}/namespaces/{namespace}/'
+ f'networkpolicies/{name}')
+ knp = {
+ 'apiVersion': constants.K8S_API_CRD_VERSION,
+ 'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY,
+ 'metadata': {
+ 'namespace': namespace,
+ 'name': name,
+ 'annotations': {
+ 'networkPolicyLink': link,
+ },
+ 'finalizers': [constants.NETWORKPOLICY_FINALIZER],
+ },
+ 'spec': {
+ 'podSelector':
+ netpolicy['spec']['networkpolicy_spec']['podSelector'],
+ 'egressSgRules': [self._convert_old_sg_rule(r) for r in
+ netpolicy['spec']['egressSgRules']],
+ 'ingressSgRules': [self._convert_old_sg_rule(r) for r in
+ netpolicy['spec']['ingressSgRules']],
+ 'policyTypes':
+ netpolicy['spec']['networkpolicy_spec']['policyTypes'],
+ },
+ 'status': {
+ 'podSelector': netpolicy['spec']['podSelector'],
+ 'securityGroupId': netpolicy['spec']['securityGroupId'],
+ # We'll just let KuryrNetworkPolicyHandler figure out if rules
+ # are created on its own.
+ 'securityGroupRules': [],
+ },
+ }
+
+ return knp
- def update_security_group_rules_from_network_policy(self, policy):
- """Update security group rules
+ def _get_security_group_rules_from_network_policy(self, policy):
+ """Get security group rules required to represent an NP
- This method updates security group rules based on CRUD events gotten
- from a configuration or patch to an existing network policy
+ This method creates the security group rules bodies coming out of a
+ network policies' parsing.
"""
- crd = self.get_kuryrnetpolicy_crd(policy)
- crd_name = crd['metadata']['name']
- LOG.debug("Already existing CRD %s", crd_name)
- sg_id = crd['spec']['securityGroupId']
- # Fetch existing SG rules from kuryrnetpolicy CRD
- existing_sg_rules = []
- existing_i_rules = crd['spec'].get('ingressSgRules')
- existing_e_rules = crd['spec'].get('egressSgRules')
- if existing_i_rules or existing_e_rules:
- existing_sg_rules = existing_i_rules + existing_e_rules
- existing_pod_selector = crd['spec'].get('podSelector')
- # Parse network policy update and get new ruleset
- i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
- current_sg_rules = i_rules + e_rules
- # Get existing security group rules ids
- sgr_ids = [x['security_group_rule'].pop('id') for x in
- existing_sg_rules]
- # SG rules that are meant to be kept get their id back
- sg_rules_to_keep = [existing_sg_rules.index(rule) for rule in
- existing_sg_rules if rule in current_sg_rules]
- for sg_rule in sg_rules_to_keep:
- sgr_id = sgr_ids[sg_rule]
- existing_sg_rules[sg_rule]['security_group_rule']['id'] = sgr_id
- # Delete SG rules that are no longer in the updated policy
- sg_rules_to_delete = [existing_sg_rules.index(rule) for rule in
- existing_sg_rules if rule not in
- current_sg_rules]
- for sg_rule in sg_rules_to_delete:
- driver_utils.delete_security_group_rule(sgr_ids[sg_rule])
- # Create new rules that weren't already on the security group
- sg_rules_to_add = [rule for rule in current_sg_rules if rule not in
- existing_sg_rules]
- for sg_rule in sg_rules_to_add:
- sgr_id = driver_utils.create_security_group_rule(sg_rule)
- if sg_rule['security_group_rule'].get('direction') == 'ingress':
- for i_rule in i_rules:
- if sg_rule == i_rule:
- i_rule["security_group_rule"]["id"] = sgr_id
- else:
- for e_rule in e_rules:
- if sg_rule == e_rule:
- e_rule["security_group_rule"]["id"] = sgr_id
- # Annotate kuryrnetpolicy CRD with current policy and ruleset
- pod_selector = policy['spec'].get('podSelector')
- driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules,
- pod_selector,
- np_spec=policy['spec'])
+ i_rules, e_rules = self.parse_network_policy_rules(policy)
+ # Add default rules to allow traffic from host and svc subnet
+ i_rules += self._get_default_np_rules()
- if existing_pod_selector != pod_selector:
- return existing_pod_selector
- return False
+ return i_rules, e_rules
- def _add_default_np_rules(self, sg_id):
+ def _get_default_np_rules(self):
"""Add extra SG rule to allow traffic from svcs and host.
This method adds the base security group rules for the NP security
@@ -130,6 +133,7 @@ def _add_default_np_rules(self, sg_id):
- Ensure traffic is allowed from the services subnet
- Ensure traffic is allowed from the host
"""
+ rules = []
default_cidrs = []
if CONF.octavia_defaults.enforce_sg_rules:
default_cidrs.append(utils.get_subnet_cidr(
@@ -141,27 +145,21 @@ def _add_default_np_rules(self, sg_id):
ethertype = constants.IPv4
if ipaddress.ip_network(cidr).version == constants.IP_VERSION_6:
ethertype = constants.IPv6
- default_rule = {
- 'security_group_rule': {
+ rules.append({
+ 'sgRule': {
'ethertype': ethertype,
- 'security_group_id': sg_id,
'direction': 'ingress',
'description': 'Kuryr-Kubernetes NetPolicy SG rule',
- 'remote_ip_prefix': cidr
- }}
- driver_utils.create_security_group_rule(default_rule)
+ 'remote_ip_prefix': cidr,
+ }})
- def create_security_group_rules_from_network_policy(self, policy,
- project_id):
- """Create initial security group and rules
+ return rules
- This method creates the initial security group for hosting security
- group rules coming out of network policies' parsing.
- """
- sg_name = ("sg-" + policy['metadata']['namespace'] + "-" +
- policy['metadata']['name'])
- desc = "Kuryr-Kubernetes NetPolicy SG"
- sg = None
+ def create_security_group(self, knp, project_id):
+ sg_name = ("sg-" + knp['metadata']['namespace'] + "-" +
+ knp['metadata']['name'])
+ desc = ("Kuryr-Kubernetes Network Policy %s SG" %
+ utils.get_res_unique_name(knp))
try:
# Create initial security group
sg = self.os_net.create_security_group(name=sg_name,
@@ -176,46 +174,14 @@ def create_security_group_rules_from_network_policy(self, policy,
# rules just after creation.
for sgr in sg.security_group_rules:
self.os_net.delete_security_group_rule(sgr['id'])
-
- i_rules, e_rules = self.parse_network_policy_rules(policy, sg.id)
- for i_rule in i_rules:
- sgr_id = driver_utils.create_security_group_rule(i_rule)
- i_rule['security_group_rule']['id'] = sgr_id
-
- for e_rule in e_rules:
- sgr_id = driver_utils.create_security_group_rule(e_rule)
- e_rule['security_group_rule']['id'] = sgr_id
-
- # Add default rules to allow traffic from host and svc subnet
- self._add_default_np_rules(sg.id)
except (os_exc.SDKException, exceptions.ResourceNotReady):
LOG.exception("Error creating security group for network policy "
- " %s", policy['metadata']['name'])
- # If there's any issue creating sg rules, remove them
- if sg:
- self.os_net.delete_security_group(sg.id)
+ " %s", knp['metadata']['name'])
raise
- try:
- self._add_kuryrnetpolicy_crd(policy, project_id, sg.id, i_rules,
- e_rules)
- except exceptions.K8sClientException:
- LOG.exception("Rolling back security groups")
- # Same with CRD creation
- self.os_net.delete_security_group(sg.id)
- raise
-
- try:
- crd = self.get_kuryrnetpolicy_crd(policy)
- self.kubernetes.annotate(policy['metadata']['selfLink'],
- {"kuryrnetpolicy_selfLink":
- crd['metadata']['selfLink']})
- except exceptions.K8sClientException:
- LOG.exception('Error annotating network policy')
- raise
+ return sg.id
- def _get_pods(self, pod_selector, namespace=None,
- namespace_selector=None):
+ def _get_pods(self, pod_selector, namespace=None, namespace_selector=None):
matching_pods = {"items": []}
if namespace_selector:
matching_namespaces = driver_utils.get_namespaces(
@@ -232,7 +198,6 @@ def _get_namespaces(self, namespace_selector, namespace=None):
if not namespace_selector and namespace:
matching_namespaces.append(self.kubernetes.get(
'{}/namespaces/{}'.format(constants.K8S_API_BASE, namespace)))
-
else:
matching_namespaces.extend(driver_utils.get_namespaces(
namespace_selector).get('items'))
@@ -285,7 +250,7 @@ def _parse_selectors(self, rule_block, rule_direction, policy_namespace):
def _create_sg_rules_with_container_ports(
self, container_ports, allow_all, resource, matched_pods,
- crd_rules, sg_id, direction, port, pod_selector=None,
+ crd_rules, direction, port, pod_selector=None,
policy_namespace=None):
cidr, ns = self._get_resource_details(resource)
for pod, container_port in container_ports:
@@ -308,18 +273,18 @@ def _create_sg_rules_with_container_ports(
if not allow_all and matched_pods and cidr:
for container_port, pods in matched_pods.items():
sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, container_port,
+ direction, container_port,
protocol=port.get('protocol'),
cidr=cidr, pods=pods)
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
- sg_id, policy_namespace, crd_rules,
+ policy_namespace, crd_rules,
resource=resource, port=container_port,
protocol=port.get('protocol'))
- def _create_sg_rule_body_on_text_port(self, sg_id, direction, port,
+ def _create_sg_rule_body_on_text_port(self, direction, port,
resources, crd_rules, pod_selector,
policy_namespace, allow_all=False):
"""Create SG rules when named port is used in the NP rule
@@ -352,7 +317,7 @@ def _create_sg_rule_body_on_text_port(self, sg_id, direction, port,
for resource in resources:
self._create_sg_rules_with_container_ports(
container_ports, allow_all, resource, matched_pods,
- crd_rules, sg_id, direction, port)
+ crd_rules, direction, port)
elif direction == "egress":
for resource in resources:
# NOTE(maysams) Skipping objects that refers to ipblocks
@@ -364,24 +329,24 @@ def _create_sg_rule_body_on_text_port(self, sg_id, direction, port,
container_ports = driver_utils.get_ports(resource, port)
self._create_sg_rules_with_container_ports(
container_ports, allow_all, resource, matched_pods,
- crd_rules, sg_id, direction, port, pod_selector,
+ crd_rules, direction, port, pod_selector,
policy_namespace)
if allow_all:
container_port = None
for container_port, pods in matched_pods.items():
for ethertype in (constants.IPv4, constants.IPv6):
sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, container_port,
+ direction, container_port,
protocol=port.get('protocol'),
ethertype=ethertype,
pods=pods)
crd_rules.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
- sg_id, policy_namespace, crd_rules,
+ policy_namespace, crd_rules,
port=container_port, protocol=port.get('protocol'))
- def _create_sg_rule_on_number_port(self, allowed_resources, sg_id,
+ def _create_sg_rule_on_number_port(self, allowed_resources,
direction, port, sg_rule_body_list,
policy_namespace):
for resource in allowed_resources:
@@ -393,52 +358,51 @@ def _create_sg_rule_on_number_port(self, allowed_resources, sg_id,
continue
sg_rule = (
driver_utils.create_security_group_rule_body(
- sg_id, direction, port.get('port'),
+ direction, port.get('port'),
protocol=port.get('protocol'),
cidr=cidr,
namespace=ns))
sg_rule_body_list.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
- sg_id, policy_namespace, sg_rule_body_list,
+ policy_namespace, sg_rule_body_list,
resource=resource, port=port.get('port'),
protocol=port.get('protocol'))
- def _create_all_pods_sg_rules(self, port, sg_id, direction,
+ def _create_all_pods_sg_rules(self, port, direction,
sg_rule_body_list, pod_selector,
policy_namespace):
if type(port.get('port')) is not int:
all_pods = driver_utils.get_namespaced_pods().get('items')
self._create_sg_rule_body_on_text_port(
- sg_id, direction, port, all_pods,
+ direction, port, all_pods,
sg_rule_body_list, pod_selector, policy_namespace,
allow_all=True)
else:
for ethertype in (constants.IPv4, constants.IPv6):
sg_rule = (
driver_utils.create_security_group_rule_body(
- sg_id, direction, port.get('port'),
+ direction, port.get('port'),
ethertype=ethertype,
protocol=port.get('protocol')))
sg_rule_body_list.append(sg_rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
- sg_id, policy_namespace, sg_rule_body_list,
+ policy_namespace, sg_rule_body_list,
port=port.get('port'),
protocol=port.get('protocol'))
- def _create_default_sg_rule(self, sg_id, direction, sg_rule_body_list):
+ def _create_default_sg_rule(self, direction, sg_rule_body_list):
for ethertype in (constants.IPv4, constants.IPv6):
default_rule = {
- 'security_group_rule': {
+ 'sgRule': {
'ethertype': ethertype,
- 'security_group_id': sg_id,
'direction': direction,
'description': 'Kuryr-Kubernetes NetPolicy SG rule',
}}
sg_rule_body_list.append(default_rule)
- def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
+ def _parse_sg_rules(self, sg_rule_body_list, direction, policy):
"""Parse policy into security group rules.
This method inspects the policy object and create the equivalent
@@ -460,16 +424,14 @@ def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
# traffic as NP policy is not affecting ingress
LOG.debug('Applying default all open for ingress for '
'policy %s', policy['metadata']['selfLink'])
- self._create_default_sg_rule(
- sg_id, direction, sg_rule_body_list)
+ self._create_default_sg_rule(direction, sg_rule_body_list)
elif direction == 'egress':
if policy_types and 'Egress' not in policy_types:
# NOTE(ltomasbo): add default rule to enable all egress
# traffic as NP policy is not affecting egress
LOG.debug('Applying default all open for egress for '
'policy %s', policy['metadata']['selfLink'])
- self._create_default_sg_rule(
- sg_id, direction, sg_rule_body_list)
+ self._create_default_sg_rule(direction, sg_rule_body_list)
else:
LOG.warning('Not supported policyType at network policy %s',
policy['metadata']['selfLink'])
@@ -487,7 +449,7 @@ def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
policy['metadata']['selfLink'])
for ethertype in (constants.IPv4, constants.IPv6):
rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, ethertype=ethertype)
+ direction, ethertype=ethertype)
sg_rule_body_list.append(rule)
for rule_block in rule_list:
@@ -519,20 +481,20 @@ def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
if allowed_resources or allow_all or selectors:
if type(port.get('port')) is not int:
self._create_sg_rule_body_on_text_port(
- sg_id, direction, port, allowed_resources,
+ direction, port, allowed_resources,
sg_rule_body_list, pod_selector,
policy_namespace)
else:
self._create_sg_rule_on_number_port(
- allowed_resources, sg_id, direction, port,
+ allowed_resources, direction, port,
sg_rule_body_list, policy_namespace)
if allow_all:
self._create_all_pods_sg_rules(
- port, sg_id, direction, sg_rule_body_list,
+ port, direction, sg_rule_body_list,
pod_selector, policy_namespace)
else:
self._create_all_pods_sg_rules(
- port, sg_id, direction, sg_rule_body_list,
+ port, direction, sg_rule_body_list,
pod_selector, policy_namespace)
elif allowed_resources or allow_all or selectors:
for resource in allowed_resources:
@@ -543,27 +505,27 @@ def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
if not cidr:
continue
rule = driver_utils.create_security_group_rule_body(
- sg_id, direction,
+ direction,
port_range_min=1,
port_range_max=65535,
cidr=cidr,
namespace=namespace)
sg_rule_body_list.append(rule)
if direction == 'egress':
- rule = self._create_svc_egress_sg_rule(
- sg_id, policy_namespace, sg_rule_body_list,
+ self._create_svc_egress_sg_rule(
+ policy_namespace, sg_rule_body_list,
resource=resource)
if allow_all:
for ethertype in (constants.IPv4, constants.IPv6):
rule = driver_utils.create_security_group_rule_body(
- sg_id, direction,
+ direction,
port_range_min=1,
port_range_max=65535,
ethertype=ethertype)
sg_rule_body_list.append(rule)
if direction == 'egress':
- self._create_svc_egress_sg_rule(
- sg_id, policy_namespace, sg_rule_body_list)
+ self._create_svc_egress_sg_rule(policy_namespace,
+ sg_rule_body_list)
else:
LOG.debug('This network policy specifies no %(direction)s '
'%(rule_direction)s and no ports: %(policy)s',
@@ -571,15 +533,14 @@ def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id):
'rule_direction': rule_direction,
'policy': policy['metadata']['selfLink']})
- def _create_svc_egress_sg_rule(self, sg_id, policy_namespace,
- sg_rule_body_list, resource=None,
- port=None, protocol=None):
+ def _create_svc_egress_sg_rule(self, policy_namespace, sg_rule_body_list,
+ resource=None, port=None, protocol=None):
services = driver_utils.get_services()
if not resource:
svc_subnet = utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet)
rule = driver_utils.create_security_group_rule_body(
- sg_id, 'egress', port, protocol=protocol, cidr=svc_subnet)
+ 'egress', port, protocol=protocol, cidr=svc_subnet)
if rule not in sg_rule_body_list:
sg_rule_body_list.append(rule)
return
@@ -613,7 +574,7 @@ def _create_svc_egress_sg_rule(self, sg_id, policy_namespace,
if not cluster_ip:
continue
rule = driver_utils.create_security_group_rule_body(
- sg_id, 'egress', port, protocol=protocol,
+ 'egress', port, protocol=protocol,
cidr=cluster_ip)
if rule not in sg_rule_body_list:
sg_rule_body_list.append(rule)
@@ -626,7 +587,7 @@ def _pods_in_ip_block(self, pods, resource):
return True
return False
- def parse_network_policy_rules(self, policy, sg_id):
+ def parse_network_policy_rules(self, policy):
"""Create security group rule bodies out of network policies.
Whenever a notification from the handler 'on-present' method is
@@ -637,10 +598,8 @@ def parse_network_policy_rules(self, policy, sg_id):
ingress_sg_rule_body_list = []
egress_sg_rule_body_list = []
- self._parse_sg_rules(ingress_sg_rule_body_list, 'ingress', policy,
- sg_id)
- self._parse_sg_rules(egress_sg_rule_body_list, 'egress', policy,
- sg_id)
+ self._parse_sg_rules(ingress_sg_rule_body_list, 'ingress', policy)
+ self._parse_sg_rules(egress_sg_rule_body_list, 'egress', policy)
return ingress_sg_rule_body_list, egress_sg_rule_body_list
@@ -657,19 +616,15 @@ def delete_np_sg(self, sg_id):
LOG.exception("Error deleting security group %s.", sg_id)
raise
- def release_network_policy(self, netpolicy_crd):
- if netpolicy_crd is not None:
- self.delete_np_sg(netpolicy_crd['spec']['securityGroupId'])
- self._del_kuryrnetpolicy_crd(
- netpolicy_crd['metadata']['name'],
- netpolicy_crd['metadata']['namespace'])
+ def release_network_policy(self, policy):
+ return self._del_knp_crd(policy)
- def get_kuryrnetpolicy_crd(self, policy):
- netpolicy_crd_name = "np-" + policy['metadata']['name']
+ def _get_knp_crd(self, policy):
+ netpolicy_crd_name = policy['metadata']['name']
netpolicy_crd_namespace = policy['metadata']['namespace']
try:
netpolicy_crd = self.kubernetes.get(
- '{}/{}/kuryrnetpolicies/{}'.format(
+ '{}/{}/kuryrnetworkpolicies/{}'.format(
constants.K8S_API_CRD_NAMESPACES, netpolicy_crd_namespace,
netpolicy_crd_name))
except exceptions.K8sResourceNotFound:
@@ -679,77 +634,81 @@ def get_kuryrnetpolicy_crd(self, policy):
raise
return netpolicy_crd
- def knps_on_namespace(self, namespace):
- try:
- netpolicy_crds = self.kubernetes.get(
- '{}/{}/kuryrnetpolicies'.format(
- constants.K8S_API_CRD_NAMESPACES,
- namespace))
- except exceptions.K8sClientException:
- LOG.exception("Kubernetes Client Exception.")
- raise
- if netpolicy_crds.get('items'):
- return True
- return False
-
- def _add_kuryrnetpolicy_crd(self, policy, project_id, sg_id, i_rules,
- e_rules):
+ def _create_knp_crd(self, policy, i_rules, e_rules):
networkpolicy_name = policy['metadata']['name']
- netpolicy_crd_name = "np-" + networkpolicy_name
namespace = policy['metadata']['namespace']
pod_selector = policy['spec'].get('podSelector')
+ policy_types = policy['spec'].get('policyTypes', [])
netpolicy_crd = {
'apiVersion': 'openstack.org/v1',
- 'kind': constants.K8S_OBJ_KURYRNETPOLICY,
+ 'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY,
'metadata': {
- 'name': netpolicy_crd_name,
+ 'name': networkpolicy_name,
'namespace': namespace,
'annotations': {
- 'networkpolicy_name': networkpolicy_name,
- 'networkpolicy_namespace': namespace,
- 'networkpolicy_uid': policy['metadata']['uid'],
+ 'networkPolicyLink': policy['metadata']['selfLink'],
},
+ 'finalizers': [constants.NETWORKPOLICY_FINALIZER],
},
'spec': {
- 'securityGroupName': "sg-" + networkpolicy_name,
- 'securityGroupId': sg_id,
'ingressSgRules': i_rules,
'egressSgRules': e_rules,
'podSelector': pod_selector,
- 'networkpolicy_spec': policy['spec']
+ 'policyTypes': policy_types,
+ },
+ 'status': {
+ 'securityGroupRules': [],
},
}
try:
- LOG.debug("Creating KuryrNetPolicy CRD %s" % netpolicy_crd)
- kubernetes_post = '{}/{}/kuryrnetpolicies'.format(
+ LOG.debug("Creating KuryrNetworkPolicy CRD %s" % netpolicy_crd)
+ url = '{}/{}/kuryrnetworkpolicies'.format(
constants.K8S_API_CRD_NAMESPACES,
namespace)
- self.kubernetes.post(kubernetes_post, netpolicy_crd)
+ netpolicy_crd = self.kubernetes.post(url, netpolicy_crd)
except exceptions.K8sClientException:
- LOG.exception("Kubernetes Client Exception creating kuryrnetpolicy"
- " CRD. %s" % exceptions.K8sClientException)
+ LOG.exception("Kubernetes Client Exception creating "
+ "KuryrNetworkPolicy CRD.")
raise
return netpolicy_crd
- def _del_kuryrnetpolicy_crd(self, netpolicy_crd_name,
- netpolicy_crd_namespace):
+ def _patch_knp_crd(self, policy, i_rules, e_rules, knp):
+ networkpolicy_name = policy['metadata']['name']
+ namespace = policy['metadata']['namespace']
+ pod_selector = policy['spec'].get('podSelector')
+ url = (f'{constants.K8S_API_CRD_NAMESPACES}/{namespace}'
+ f'/kuryrnetworkpolicies/{networkpolicy_name}')
+
+ # FIXME(dulek): Rules should be hashable objects, not dict so that
+ # we could compare them easily here.
+ data = {
+ 'ingressSgRules': i_rules,
+ 'egressSgRules': e_rules,
+ }
+ if knp['spec'].get('podSelector') != pod_selector:
+ data['podSelector'] = pod_selector
+
+ self.kubernetes.patch_crd('spec', url, data)
+
+ def _del_knp_crd(self, policy):
try:
- LOG.debug("Deleting KuryrNetPolicy CRD %s" % netpolicy_crd_name)
- self.kubernetes.delete('{}/{}/kuryrnetpolicies/{}'.format(
- constants.K8S_API_CRD_NAMESPACES,
- netpolicy_crd_namespace,
- netpolicy_crd_name))
+ ns = policy['metadata']['namespace']
+ name = policy['metadata']['name']
+ LOG.debug("Deleting KuryrNetworkPolicy CRD %s" % name)
+ self.kubernetes.delete('{}/{}/kuryrnetworkpolicies/{}'.format(
+ constants.K8S_API_CRD_NAMESPACES, ns, name))
+ return True
except exceptions.K8sResourceNotFound:
- LOG.debug("KuryrNetPolicy CRD Object not found: %s",
- netpolicy_crd_name)
+ LOG.debug("KuryrNetworkPolicy CRD Object not found: %s", name)
+ return False
except exceptions.K8sClientException:
- LOG.exception("Kubernetes Client Exception deleting kuryrnetpolicy"
- " CRD.")
+ LOG.exception("Kubernetes Client Exception deleting "
+ "KuryrNetworkPolicy CRD %s." % name)
raise
def affected_pods(self, policy, selector=None):
- if selector or selector == {}:
+ if selector is not None:
pod_selector = selector
else:
pod_selector = policy['spec'].get('podSelector')
diff --git a/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py b/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
index 1d87ef973..46f767e95 100644
--- a/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
+++ b/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import uuid
+
from oslo_config import cfg
from oslo_log import log as logging
@@ -21,6 +23,7 @@
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions
+from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
@@ -29,9 +32,7 @@ def _get_namespace_labels(namespace):
kubernetes = clients.get_kubernetes_client()
try:
- path = '{}/{}'.format(
- constants.K8S_API_NAMESPACES, namespace)
- LOG.debug("K8s API Query %s", path)
+ path = '{}/{}'.format(constants.K8S_API_NAMESPACES, namespace)
namespaces = kubernetes.get(path)
LOG.debug("Return Namespace: %s", namespaces)
except exceptions.K8sResourceNotFound:
@@ -43,107 +44,41 @@ def _get_namespace_labels(namespace):
return namespaces['metadata'].get('labels')
-def _create_sg_rule(sg_id, direction, cidr, port=None, namespace=None):
- if port:
- sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, port.get('port'),
- protocol=port.get('protocol'), cidr=cidr, namespace=namespace)
- else:
- sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, port_range_min=1,
- port_range_max=65535, cidr=cidr, namespace=namespace)
-
- sgr_id = driver_utils.create_security_group_rule(sg_rule)
+def _bump_networkpolicy(knp):
+ kubernetes = clients.get_kubernetes_client()
- sg_rule['security_group_rule']['id'] = sgr_id
- return sg_rule
+ try:
+ kubernetes.annotate(
+ knp['metadata']['annotations']['networkPolicyLink'],
+ {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())})
+ except exceptions.K8sResourceNotFound:
+ LOG.exception("NetworkPolicy not found")
+ raise
+ except exceptions.K8sClientException:
+ LOG.exception("Kubernetes Client Exception")
+ raise
-def _get_crd_rule(crd_rules, container_port):
- """Returns a CRD rule that matches a container port
+def _create_sg_rules_with_container_ports(container_ports, matched):
+ """Checks if security group rules based on container ports will be updated
- Retrieves the CRD rule that contains a given port in
- the range of the rule ports.
- """
- for crd_rule in crd_rules:
- remote_ip_prefixes = crd_rule.get('remote_ip_prefixes')
- min_port = crd_rule['security_group_rule'].get('port_range_min')
- max_port = crd_rule['security_group_rule'].get('port_range_max')
- if (remote_ip_prefixes and (
- min_port >= container_port and
- container_port <= max_port)):
- return crd_rule
-
-
-def _create_sg_rules_with_container_ports(matched_pods, container_ports,
- allow_all, namespace, matched,
- crd_rules, sg_id, direction,
- port, rule_selected_pod):
- """Create security group rules based on container ports
-
- If it's an allow from/to everywhere rule or a rule with a
- NamespaceSelector, updates a sg rule that might already exist
- and match the named port or creates a new one with the
- remote_ip_prefixes field containing the matched pod info.
- Otherwise, creates rules for each container port without
- a remote_ip_prefixes field.
-
- param matched_pods: List of dicts where the key is a container
- port and value is the pods that have the port
param container_ports: List of tuples with pods and port values
- param allow_all: True is it's an allow from/to everywhere rule,
- False otherwise.
- param namespace: Namespace name
param matched: If a sg rule was created for the NP rule
- param crd_rules: List of sg rules to update when patching the CRD
- param sg_id: ID of the security group
- param direction: String representing rule direction, ingress or egress
- param port: Dict containing port and protocol
- param rule_selected_pod: K8s Pod object selected by the rules selectors
- return: True if a sg rule was created, False otherwise.
+ return: True if a sg rule needs to be created, False otherwise.
"""
for pod, container_port in container_ports:
- pod_namespace = pod['metadata']['namespace']
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
LOG.debug("Skipping SG rule creation for pod %s due to "
"no IP assigned", pod['metadata']['name'])
continue
+ return matched
+ return False
- pod_info = {pod_ip: pod_namespace}
- matched = True
- if allow_all or namespace:
- crd_rule = _get_crd_rule(crd_rules, container_port)
- if crd_rule:
- crd_rule['remote_ip_prefixes'].update(pod_info)
- else:
- if container_port in matched_pods:
- matched_pods[container_port].update(pod_info)
- else:
- matched_pods[container_port] = pod_info
- else:
- pod_ip = driver_utils.get_pod_ip(rule_selected_pod)
- if not pod_ip:
- LOG.debug("Skipping SG rule creation for pod %s due to no IP "
- "assigned", rule_selected_pod['metadata']['name'])
- continue
- sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, container_port,
- protocol=port.get('protocol'),
- cidr=pod_ip, pods=pod_info)
- sgr_id = driver_utils.create_security_group_rule(sg_rule)
- sg_rule['security_group_rule']['id'] = sgr_id
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
- return matched
-
-
-def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
- crd_rules, matched, crd,
- allow_all=False, namespace=None):
- matched_pods = {}
+def _create_sg_rule_on_text_port(direction, port, rule_selected_pods, matched,
+ crd):
spec_pod_selector = crd['spec'].get('podSelector')
policy_namespace = crd['metadata']['namespace']
spec_pods = driver_utils.get_pods(
@@ -151,11 +86,8 @@ def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
if direction == 'ingress':
for spec_pod in spec_pods:
container_ports = driver_utils.get_ports(spec_pod, port)
- for rule_selected_pod in rule_selected_pods:
- matched = _create_sg_rules_with_container_ports(
- matched_pods, container_ports, allow_all, namespace,
- matched, crd_rules, sg_id, direction, port,
- rule_selected_pod)
+ matched = _create_sg_rules_with_container_ports(
+ container_ports, matched)
elif direction == 'egress':
for rule_selected_pod in rule_selected_pods:
pod_label = rule_selected_pod['metadata'].get('labels')
@@ -168,51 +100,11 @@ def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
container_ports = driver_utils.get_ports(
rule_selected_pod, port)
matched = _create_sg_rules_with_container_ports(
- matched_pods, container_ports, allow_all,
- namespace, matched, crd_rules, sg_id, direction,
- port, rule_selected_pod)
-
- _apply_sg_rules_on_matched_pods(matched_pods, sg_id, direction, namespace,
- port, crd_rules, allow_all)
-
+ container_ports, matched)
return matched
-def _apply_sg_rules_on_matched_pods(matched_pods, sg_id, direction, namespace,
- port, crd_rules, allow_all=False):
- for container_port, pods in matched_pods.items():
- if allow_all:
- for ethertype in (constants.IPv4, constants.IPv6):
- sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, container_port,
- protocol=port.get('protocol'),
- ethertype=ethertype,
- pods=pods)
- sgr_id = driver_utils.create_security_group_rule(sg_rule)
- sg_rule['security_group_rule']['id'] = sgr_id
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
- else:
- namespace_obj = driver_utils.get_namespace(namespace)
- if not namespace_obj:
- LOG.debug("Skipping SG rule creation. Inexistent"
- " namespace.")
- continue
- namespace_cidr = driver_utils.get_namespace_subnet_cidr(
- namespace_obj)
- sg_rule = driver_utils.create_security_group_rule_body(
- sg_id, direction, container_port,
- protocol=port.get('protocol'), cidr=namespace_cidr,
- pods=pods)
- sgr_id = driver_utils.create_security_group_rule(sg_rule)
- sg_rule['security_group_rule']['id'] = sgr_id
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
-
-
-def _create_sg_rules(crd, pod, pod_selector, rule_block,
- crd_rules, direction, matched, namespace=None,
- allow_all=False):
+def _create_sg_rules(crd, pod, pod_selector, rule_block, direction, matched):
pod_labels = pod['metadata'].get('labels')
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
@@ -224,73 +116,52 @@ def _create_sg_rules(crd, pod, pod_selector, rule_block,
# with empty value or with '{}', as they have same result in here.
if pod_selector:
if driver_utils.match_selector(pod_selector, pod_labels):
- sg_id = crd['spec']['securityGroupId']
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = _create_sg_rule_on_text_port(
- sg_id, direction, port, [pod],
- crd_rules, matched, crd)
+ direction, port, [pod], matched, crd)
else:
matched = True
- sg_rule = _create_sg_rule(
- sg_id, direction, cidr=pod_ip, port=port,
- namespace=namespace)
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
else:
matched = True
- sg_rule = _create_sg_rule(
- sg_id, direction, cidr=pod_ip, namespace=namespace)
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
else:
# NOTE (maysams) When a policy with namespaceSelector and text port
# is applied the port on the pods needs to be retrieved.
- sg_id = crd['spec']['securityGroupId']
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
- matched = (
- _create_sg_rule_on_text_port(
- sg_id, direction, port, [pod],
- crd_rules, matched, crd,
- allow_all=allow_all, namespace=namespace))
+ matched = _create_sg_rule_on_text_port(
+ direction, port, [pod], matched, crd)
return matched
def _parse_selectors_on_pod(crd, pod, pod_selector, namespace_selector,
- rule_block, crd_rules, direction, matched):
+ rule_block, direction, matched):
pod_namespace = pod['metadata']['namespace']
pod_namespace_labels = _get_namespace_labels(pod_namespace)
policy_namespace = crd['metadata']['namespace']
if namespace_selector == {}:
matched = _create_sg_rules(crd, pod, pod_selector, rule_block,
- crd_rules, direction, matched,
- allow_all=True)
+ direction, matched)
elif namespace_selector:
if (pod_namespace_labels and
driver_utils.match_selector(namespace_selector,
pod_namespace_labels)):
matched = _create_sg_rules(crd, pod, pod_selector,
- rule_block, crd_rules,
- direction, matched,
- namespace=pod_namespace)
+ rule_block, direction, matched)
else:
if pod_namespace == policy_namespace:
matched = _create_sg_rules(crd, pod, pod_selector, rule_block,
- crd_rules, direction, matched,
- namespace=pod_namespace)
- return matched, crd_rules
+ direction, matched)
+ return matched
def _parse_selectors_on_namespace(crd, direction, pod_selector,
- ns_selector, rule_block, crd_rules,
- namespace, matched):
+ ns_selector, rule_block, namespace, matched):
ns_name = namespace['metadata'].get('name')
ns_labels = namespace['metadata'].get('labels')
- sg_id = crd['spec']['securityGroupId']
if (ns_selector and ns_labels and
driver_utils.match_selector(ns_selector, ns_labels)):
@@ -301,10 +172,8 @@ def _parse_selectors_on_namespace(crd, direction, pod_selector,
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
- sg_id, direction, port, pods,
- crd_rules, matched, crd))
+ direction, port, pods, matched, crd))
else:
- matched = True
for pod in pods:
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
@@ -312,11 +181,7 @@ def _parse_selectors_on_namespace(crd, direction, pod_selector,
LOG.debug("Skipping SG rule creation for pod "
"%s due to no IP assigned", pod_name)
continue
- sg_rule = _create_sg_rule(
- sg_id, direction, pod_ip, port=port,
- namespace=ns_name)
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
+ matched = True
else:
for pod in pods:
pod_ip = driver_utils.get_pod_ip(pod)
@@ -326,45 +191,25 @@ def _parse_selectors_on_namespace(crd, direction, pod_selector,
" to no IP assigned", pod_name)
continue
matched = True
- sg_rule = _create_sg_rule(
- sg_id, direction, pod_ip,
- namespace=ns_name)
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
else:
ns_pods = driver_utils.get_pods(ns_selector)['items']
- ns_cidr = driver_utils.get_namespace_subnet_cidr(namespace)
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
- sg_id, direction, port, ns_pods,
- crd_rules, matched, crd))
+ direction, port, ns_pods, matched, crd))
else:
matched = True
- sg_rule = _create_sg_rule(
- sg_id, direction, ns_cidr,
- port=port, namespace=ns_name)
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
else:
matched = True
- sg_rule = _create_sg_rule(
- sg_id, direction, ns_cidr,
- namespace=ns_name)
- if sg_rule not in crd_rules:
- crd_rules.append(sg_rule)
- return matched, crd_rules
+ return matched
-def _parse_rules(direction, crd, pod=None, namespace=None):
- policy = crd['spec']['networkpolicy_spec']
+def _parse_rules(direction, crd, policy, pod=None, namespace=None):
rule_direction = 'from'
- crd_rules = crd['spec'].get('ingressSgRules')
if direction == 'egress':
rule_direction = 'to'
- crd_rules = crd['spec'].get('egressSgRules')
matched = False
rule_list = policy.get(direction, [])
@@ -373,13 +218,13 @@ def _parse_rules(direction, crd, pod=None, namespace=None):
namespace_selector = rule.get('namespaceSelector')
pod_selector = rule.get('podSelector')
if pod:
- matched, crd_rules = _parse_selectors_on_pod(
+ matched = _parse_selectors_on_pod(
crd, pod, pod_selector, namespace_selector,
- rule_block, crd_rules, direction, matched)
+ rule_block, direction, matched)
elif namespace:
- matched, crd_rules = _parse_selectors_on_namespace(
+ matched = _parse_selectors_on_namespace(
crd, direction, pod_selector, namespace_selector,
- rule_block, crd_rules, namespace, matched)
+ rule_block, namespace, matched)
# NOTE(maysams): Cover the case of a network policy that allows
# from everywhere on a named port, e.g., when there is no 'from'
@@ -387,84 +232,62 @@ def _parse_rules(direction, crd, pod=None, namespace=None):
if pod and not matched:
for port in rule_block.get('ports', []):
if type(port.get('port')) is not int:
- sg_id = crd['spec']['securityGroupId']
if (not rule_block.get(rule_direction, [])
or direction == "ingress"):
- matched = (_create_sg_rule_on_text_port(
- sg_id, direction, port, [pod],
- crd_rules, matched, crd,
- allow_all=True))
- return matched, crd_rules
+ matched = _create_sg_rule_on_text_port(
+ direction, port, [pod], matched, crd)
+ return matched
def _parse_rules_on_delete_namespace(rule_list, direction, ns_name):
- matched = False
- rules = []
for rule in rule_list:
- LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction,
- 'r': rule})
+ LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule})
rule_namespace = rule.get('namespace', None)
- remote_ip_prefixes = rule.get('remote_ip_prefixes', {})
+ affectedPods = rule.get('affectedPods', [])
if rule_namespace and rule_namespace == ns_name:
- matched = True
- driver_utils.delete_security_group_rule(
- rule['security_group_rule']['id'])
- elif remote_ip_prefixes:
- for remote_ip, namespace in list(remote_ip_prefixes.items()):
- if namespace == ns_name:
- matched = True
- remote_ip_prefixes.pop(remote_ip)
- if remote_ip_prefixes:
- rule['remote_ip_prefixes'] = remote_ip_prefixes
- rules.append(rule)
- else:
- rules.append(rule)
- return matched, rules
+ return True
+ elif affectedPods:
+ for pod_info in affectedPods:
+ if pod_info['podNamespace'] == ns_name:
+ return True
+ return False
def _parse_rules_on_delete_pod(rule_list, direction, pod_ip):
- matched = False
- rules = []
for rule in rule_list:
- LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction,
- 'r': rule})
- remote_ip_prefix = rule['security_group_rule'].get(
- 'remote_ip_prefix')
- remote_ip_prefixes = rule.get('remote_ip_prefixes', {})
+ LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule})
+ remote_ip_prefix = rule['sgRule'].get('remote_ip_prefix')
+ affectedPods = rule.get('affectedPods', [])
if remote_ip_prefix and remote_ip_prefix == pod_ip:
- matched = True
- driver_utils.delete_security_group_rule(
- rule['security_group_rule']['id'])
- elif remote_ip_prefixes:
- if pod_ip in remote_ip_prefixes:
- matched = True
- remote_ip_prefixes.pop(pod_ip)
- if remote_ip_prefixes:
- rule['remote_ip_prefixes'] = remote_ip_prefixes
- rules.append(rule)
- else:
- rules.append(rule)
- return matched, rules
+ return True
+ elif affectedPods:
+ for pod_info in affectedPods:
+ if pod_info['podIP'] == pod_ip:
+ return True
+ return False
-def _get_pod_sgs(pod, project_id):
+def _get_pod_sgs(pod):
sg_list = []
pod_labels = pod['metadata'].get('labels')
pod_namespace = pod['metadata']['namespace']
- knp_crds = driver_utils.get_kuryrnetpolicy_crds(
+ knp_crds = driver_utils.get_kuryrnetworkpolicy_crds(
namespace=pod_namespace)
- for crd in knp_crds.get('items'):
+ for crd in knp_crds:
pod_selector = crd['spec'].get('podSelector')
- if pod_selector:
- if driver_utils.match_selector(pod_selector, pod_labels):
- LOG.debug("Appending %s",
- str(crd['spec']['securityGroupId']))
- sg_list.append(str(crd['spec']['securityGroupId']))
- else:
- LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
- sg_list.append(str(crd['spec']['securityGroupId']))
+ if driver_utils.match_selector(pod_selector, pod_labels):
+ sg_id = crd['status'].get('securityGroupId')
+ if not sg_id:
+ # NOTE(dulek): We could just assume KNP handler will apply it,
+ # but it's possible that when it gets this pod it
+ # will have no IP yet and will be skipped.
+ LOG.warning('SG for NP %s not created yet, will retry.',
+ utils.get_res_unique_name(crd))
+ raise exceptions.ResourceNotReady(pod)
+ LOG.debug("Appending %s", crd['status']['securityGroupId'])
+ sg_list.append(crd['status']['securityGroupId'])
# NOTE(maysams) Pods that are not selected by any Networkpolicy
# are fully accessible. Thus, the default security group is associated.
@@ -481,55 +304,56 @@ class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
"""Provides security groups for pods based on network policies"""
def get_security_groups(self, pod, project_id):
- return _get_pod_sgs(pod, project_id)
+ return _get_pod_sgs(pod)
def create_sg_rules(self, pod):
- LOG.debug("Creating sg rule for pod: %s", pod['metadata']['name'])
+ LOG.debug("Creating SG rules for pod: %s", pod['metadata']['name'])
crd_pod_selectors = []
- knp_crds = driver_utils.get_kuryrnetpolicy_crds()
- for crd in knp_crds.get('items'):
+ knp_crds = driver_utils.get_kuryrnetworkpolicy_crds()
+ nps = driver_utils.get_networkpolicies()
+ pairs = driver_utils.zip_knp_np(knp_crds, nps)
+
+ for crd, policy in pairs:
crd_selector = crd['spec'].get('podSelector')
+ spec = policy.get('spec')
- i_matched, i_rules = _parse_rules('ingress', crd, pod=pod)
- e_matched, e_rules = _parse_rules('egress', crd, pod=pod)
+ i_matched = _parse_rules('ingress', crd, spec, pod=pod)
+ e_matched = _parse_rules('egress', crd, spec, pod=pod)
if i_matched or e_matched:
- driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules,
- e_rules,
- crd_selector)
+ _bump_networkpolicy(crd)
if i_matched:
crd_pod_selectors.append(crd_selector)
return crd_pod_selectors
def delete_sg_rules(self, pod):
- LOG.debug("Deleting sg rule for pod: %s", pod['metadata']['name'])
+ LOG.debug("Deleting SG rules for pod: %s", pod['metadata']['name'])
pod_ip = driver_utils.get_pod_ip(pod)
+ crd_pod_selectors = []
if not pod_ip:
LOG.debug("Skipping SG rule deletion as pod %s has no IP assigned",
pod['metadata']['name'])
- return None
- crd_pod_selectors = []
- knp_crds = driver_utils.get_kuryrnetpolicy_crds()
- for crd in knp_crds.get('items'):
+ return crd_pod_selectors
+ knp_crds = driver_utils.get_kuryrnetworkpolicy_crds()
+ for crd in knp_crds:
crd_selector = crd['spec'].get('podSelector')
ingress_rule_list = crd['spec'].get('ingressSgRules')
egress_rule_list = crd['spec'].get('egressSgRules')
- i_matched, i_rules = _parse_rules_on_delete_pod(
+ i_matched = _parse_rules_on_delete_pod(
ingress_rule_list, "ingress", pod_ip)
- e_matched, e_rules = _parse_rules_on_delete_pod(
+ e_matched = _parse_rules_on_delete_pod(
egress_rule_list, "egress", pod_ip)
if i_matched or e_matched:
- driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules,
- e_rules,
- crd_selector)
+ _bump_networkpolicy(crd)
if i_matched:
crd_pod_selectors.append(crd_selector)
return crd_pod_selectors
def update_sg_rules(self, pod):
- LOG.debug("Updating sg rule for pod: %s", pod['metadata']['name'])
+ LOG.debug("Updating SG rules for pod: %s", pod['metadata']['name'])
+ # FIXME(dulek): No need to bump twice.
crd_pod_selectors = []
crd_pod_selectors.extend(self.delete_sg_rules(pod))
crd_pod_selectors.extend(self.create_sg_rules(pod))
@@ -537,51 +361,47 @@ def update_sg_rules(self, pod):
def delete_namespace_sg_rules(self, namespace):
ns_name = namespace['metadata']['name']
- LOG.debug("Deleting sg rule for namespace: %s",
- ns_name)
+ LOG.debug("Deleting SG rules for namespace: %s", ns_name)
crd_selectors = []
- knp_crds = driver_utils.get_kuryrnetpolicy_crds()
- for crd in knp_crds.get('items'):
+ knp_crds = driver_utils.get_kuryrnetworkpolicy_crds()
+ for crd in knp_crds:
crd_selector = crd['spec'].get('podSelector')
ingress_rule_list = crd['spec'].get('ingressSgRules')
egress_rule_list = crd['spec'].get('egressSgRules')
- i_matched, i_rules = _parse_rules_on_delete_namespace(
+ i_matched = _parse_rules_on_delete_namespace(
ingress_rule_list, "ingress", ns_name)
- e_matched, e_rules = _parse_rules_on_delete_namespace(
+ e_matched = _parse_rules_on_delete_namespace(
egress_rule_list, "egress", ns_name)
if i_matched or e_matched:
- driver_utils.patch_kuryrnetworkpolicy_crd(
- crd, i_rules, e_rules, crd_selector)
+ _bump_networkpolicy(crd)
if i_matched:
crd_selectors.append(crd_selector)
return crd_selectors
def create_namespace_sg_rules(self, namespace):
ns_name = namespace['metadata']['name']
- LOG.debug("Creating sg rule for namespace: %s", ns_name)
+ LOG.debug("Creating SG rules for namespace: %s", ns_name)
crd_selectors = []
- knp_crds = driver_utils.get_kuryrnetpolicy_crds()
- for crd in knp_crds.get('items'):
+ knp_crds = driver_utils.get_kuryrnetworkpolicy_crds()
+ nps = driver_utils.get_networkpolicies()
+ pairs = driver_utils.zip_knp_np(knp_crds, nps)
+ for crd, policy in pairs:
crd_selector = crd['spec'].get('podSelector')
-
- i_matched, i_rules = _parse_rules(
- 'ingress', crd, namespace=namespace)
- e_matched, e_rules = _parse_rules(
- 'egress', crd, namespace=namespace)
+ spec = policy.get('spec')
+ i_matched = _parse_rules('ingress', crd, spec, namespace=namespace)
+ e_matched = _parse_rules('egress', crd, spec, namespace=namespace)
if i_matched or e_matched:
- driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules,
- e_rules,
- crd_selector)
+ _bump_networkpolicy(crd)
if i_matched:
crd_selectors.append(crd_selector)
return crd_selectors
def update_namespace_sg_rules(self, namespace):
- LOG.debug("Updating sg rule for namespace: %s",
+ LOG.debug("Updating SG rules for namespace: %s",
namespace['metadata']['name'])
crd_selectors = []
crd_selectors.extend(self.delete_namespace_sg_rules(namespace))
@@ -608,5 +428,5 @@ def get_security_groups(self, service, project_id):
# all of them. Hence only considering the security groups applied
# to the first one.
if pods:
- return _get_pod_sgs(pods[0], project_id)
+ return _get_pod_sgs(pods[0])
return sg_list[:]
diff --git a/kuryr_kubernetes/controller/drivers/neutron_vif.py b/kuryr_kubernetes/controller/drivers/neutron_vif.py
index 2ffd352f5..ca94d57da 100644
--- a/kuryr_kubernetes/controller/drivers/neutron_vif.py
+++ b/kuryr_kubernetes/controller/drivers/neutron_vif.py
@@ -95,10 +95,10 @@ def activate_vif(self, vif):
def update_vif_sgs(self, pod, security_groups):
os_net = clients.get_network_client()
- pod_state = utils.get_pod_state(pod)
- if pod_state:
+ vifs = utils.get_vifs(pod)
+ if vifs:
# NOTE(ltomasbo): It just updates the default_vif security group
- port_id = pod_state.vifs[constants.DEFAULT_IFNAME].id
+ port_id = vifs[constants.DEFAULT_IFNAME].id
os_net.update_port(port_id, security_groups=list(security_groups))
def _get_port_request(self, pod, project_id, subnets, security_groups,
diff --git a/kuryr_kubernetes/controller/drivers/utils.py b/kuryr_kubernetes/controller/drivers/utils.py
index abeaf8ebe..093b5e514 100644
--- a/kuryr_kubernetes/controller/drivers/utils.py
+++ b/kuryr_kubernetes/controller/drivers/utils.py
@@ -17,6 +17,7 @@
import netaddr
from openstack import exceptions as os_exc
+from os_vif import objects
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
@@ -59,15 +60,23 @@ def get_host_id(pod):
return pod['spec']['nodeName']
-def get_pod_state(pod):
+def get_kuryrport(pod):
+ k8s = clients.get_kubernetes_client()
try:
- annotations = pod['metadata']['annotations']
- state_annotation = annotations[constants.K8S_ANNOTATION_VIF]
- except KeyError:
+ return k8s.get(f'{constants.K8S_API_CRD_NAMESPACES}/'
+ f'{pod["metadata"]["namespace"]}/kuryrports/'
+ f'{pod["metadata"]["name"]}')
+ except k_exc.K8sResourceNotFound:
return None
- state_annotation = jsonutils.loads(state_annotation)
- state = utils.extract_pod_annotation(state_annotation)
- return state
+
+
+def get_vifs(pod):
+ kp = get_kuryrport(pod)
+ try:
+ return {k: objects.base.VersionedObject.obj_from_primitive(v['vif'])
+ for k, v in kp['spec']['vifs'].items()}
+ except (KeyError, AttributeError, TypeError):
+ return {}
def is_host_network(pod):
@@ -174,10 +183,8 @@ def replace_encoded_characters(labels):
def create_security_group_rule(body):
os_net = clients.get_network_client()
- sgr = ''
-
try:
- params = dict(body['security_group_rule'])
+ params = dict(body)
if 'ethertype' in params:
# NOTE(gryf): in openstacksdk, there is ether_type attribute in
# the security_group_rule object, in CRD we have 'ethertype'
@@ -212,29 +219,27 @@ def delete_security_group_rule(security_group_rule_id):
raise
-def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules, pod_selector,
- np_spec=None):
+def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules):
kubernetes = clients.get_kubernetes_client()
crd_name = crd['metadata']['name']
- if not np_spec:
- np_spec = crd['spec']['networkpolicy_spec']
- LOG.debug('Patching KuryrNetPolicy CRD %s' % crd_name)
+ LOG.debug('Patching KuryrNetworkPolicy CRD %s' % crd_name)
try:
- kubernetes.patch_crd('spec', crd['metadata']['selfLink'],
- {'ingressSgRules': i_rules,
- 'egressSgRules': e_rules,
- 'podSelector': pod_selector,
- 'networkpolicy_spec': np_spec})
+ spec = {
+ 'ingressSgRules': i_rules,
+ 'egressSgRules': e_rules,
+ }
+
+ kubernetes.patch_crd('spec', crd['metadata']['selfLink'], spec)
except k_exc.K8sResourceNotFound:
- LOG.debug('KuryrNetPolicy CRD not found %s', crd_name)
+ LOG.debug('KuryrNetworkPolicy CRD not found %s', crd_name)
except k_exc.K8sClientException:
- LOG.exception('Error updating kuryrnetpolicy CRD %s', crd_name)
+ LOG.exception('Error updating KuryrNetworkPolicy CRD %s', crd_name)
raise
def create_security_group_rule_body(
- security_group_id, direction, port_range_min=None,
- port_range_max=None, protocol=None, ethertype=None, cidr=None,
+ direction, port_range_min=None, port_range_max=None, protocol=None,
+ ethertype='IPv4', cidr=None,
description="Kuryr-Kubernetes NetPolicy SG rule", namespace=None,
pods=None):
if not port_range_min:
@@ -245,15 +250,12 @@ def create_security_group_rule_body(
if not protocol:
protocol = 'TCP'
- if not ethertype:
- ethertype = 'IPv4'
- if cidr and netaddr.IPNetwork(cidr).version == 6:
- ethertype = 'IPv6'
+ if cidr and netaddr.IPNetwork(cidr).version == 6:
+ ethertype = 'IPv6'
security_group_rule_body = {
- 'security_group_rule': {
+ 'sgRule': {
'ethertype': ethertype,
- 'security_group_id': security_group_id,
'description': description,
'direction': direction,
'protocol': protocol.lower(),
@@ -262,31 +264,29 @@ def create_security_group_rule_body(
}
}
if cidr:
- security_group_rule_body['security_group_rule'][
- 'remote_ip_prefix'] = cidr
+ security_group_rule_body['sgRule']['remote_ip_prefix'] = cidr
if namespace:
security_group_rule_body['namespace'] = namespace
if pods:
- security_group_rule_body['remote_ip_prefixes'] = pods
+ security_group_rule_body['affectedPods'] = [
+ {'podIP': ip, 'podNamespace': ns} for ip, ns in pods.items()]
LOG.debug("Creating sg rule body %s", security_group_rule_body)
return security_group_rule_body
def get_pod_ip(pod):
try:
- pod_metadata = pod['metadata']['annotations']
- vif = pod_metadata[constants.K8S_ANNOTATION_VIF]
- except KeyError:
+ kp = get_kuryrport(pod)
+ vif = [x['vif'] for x in kp['spec']['vifs'].values()
+ if x['default']][0]
+ except (KeyError, TypeError, IndexError):
return None
- vif = jsonutils.loads(vif)
- vif = vif['versioned_object.data']['default_vif']
- network = (vif['versioned_object.data']['network']
- ['versioned_object.data'])
- first_subnet = (network['subnets']['versioned_object.data']
- ['objects'][0]['versioned_object.data'])
- first_subnet_ip = (first_subnet['ips']['versioned_object.data']
- ['objects'][0]['versioned_object.data']['address'])
- return first_subnet_ip
+ return (vif['versioned_object.data']['network']
+ ['versioned_object.data']['subnets']
+ ['versioned_object.data']['objects'][0]
+ ['versioned_object.data']['ips']
+ ['versioned_object.data']['objects'][0]
+ ['versioned_object.data']['address'])
def get_annotations(resource, annotation):
@@ -304,25 +304,60 @@ def get_annotated_labels(resource, annotation_labels):
return None
-def get_kuryrnetpolicy_crds(namespace=None):
+def get_kuryrnetworkpolicy_crds(namespace=None):
kubernetes = clients.get_kubernetes_client()
try:
if namespace:
- knp_path = '{}/{}/kuryrnetpolicies'.format(
+ knp_path = '{}/{}/kuryrnetworkpolicies'.format(
constants.K8S_API_CRD_NAMESPACES, namespace)
else:
- knp_path = constants.K8S_API_CRD_KURYRNETPOLICIES
- LOG.debug("K8s API Query %s", knp_path)
+ knp_path = constants.K8S_API_CRD_KURYRNETWORKPOLICIES
knps = kubernetes.get(knp_path)
- LOG.debug("Return Kuryr Network Policies with label %s", knps)
+ LOG.debug("Returning KuryrNetworkPolicies %s", knps)
except k_exc.K8sResourceNotFound:
- LOG.exception("KuryrNetPolicy CRD not found")
+ LOG.exception("KuryrNetworkPolicy CRD not found")
raise
except k_exc.K8sClientException:
LOG.exception("Kubernetes Client Exception")
raise
- return knps
+ return knps.get('items', [])
+
+
+def get_networkpolicies(namespace=None):
+ # FIXME(dulek): This is awful, shouldn't we have list method on k8s_client?
+ kubernetes = clients.get_kubernetes_client()
+
+ try:
+ if namespace:
+ np_path = '{}/{}/networkpolicies'.format(
+ constants.K8S_API_CRD_NAMESPACES, namespace)
+ else:
+ np_path = constants.K8S_API_POLICIES
+ nps = kubernetes.get(np_path)
+ except k_exc.K8sResourceNotFound:
+ LOG.exception("NetworkPolicy or namespace %s not found", namespace)
+ raise
+ except k_exc.K8sClientException:
+ LOG.exception("Exception when listing NetworkPolicies.")
+ raise
+ return nps.get('items', [])
+
+
+def zip_knp_np(knps, nps):
+ """Returns tuples of matching KuryrNetworkPolicy and NetworkPolicy objs.
+
+ :param knps: List of KuryrNetworkPolicy objects
+ :param nps: List of NetworkPolicy objects
+ :return: List of tuples of matching (knp, np)
+ """
+ pairs = []
+ for knp in knps:
+ for np in nps:
+ if utils.get_res_unique_name(knp) == utils.get_res_unique_name(np):
+ pairs.append((knp, np))
+ break
+ return pairs
def match_expressions(expressions, labels):
@@ -363,6 +398,8 @@ def match_labels(crd_labels, labels):
def match_selector(selector, labels):
+ if selector is None:
+ return True
crd_labels = selector.get('matchLabels', None)
crd_expressions = selector.get('matchExpressions', None)
diff --git a/kuryr_kubernetes/controller/drivers/vif_pool.py b/kuryr_kubernetes/controller/drivers/vif_pool.py
index 809002a97..8367e1891 100644
--- a/kuryr_kubernetes/controller/drivers/vif_pool.py
+++ b/kuryr_kubernetes/controller/drivers/vif_pool.py
@@ -27,7 +27,6 @@
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_log import versionutils
-from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
@@ -280,16 +279,9 @@ def _get_in_use_ports(self):
in_use_ports = []
running_pods = kubernetes.get(constants.K8S_API_BASE + '/pods')
for pod in running_pods['items']:
- try:
- annotations = jsonutils.loads(pod['metadata']['annotations'][
- constants.K8S_ANNOTATION_VIF])
- pod_state = utils.extract_pod_annotation(annotations)
- except KeyError:
- LOG.debug("Skipping pod without kuryr VIF annotation: %s",
- pod)
- else:
- for vif in pod_state.vifs.values():
- in_use_ports.append(vif.id)
+ vifs = c_utils.get_vifs(pod)
+ for data in vifs.values():
+ in_use_ports.append(data.id)
return in_use_ports
def list_pools(self):
diff --git a/kuryr_kubernetes/controller/handlers/kuryrnetpolicy.py b/kuryr_kubernetes/controller/handlers/kuryrnetpolicy.py
deleted file mode 100644
index 8c30f7c18..000000000
--- a/kuryr_kubernetes/controller/handlers/kuryrnetpolicy.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2019 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from kuryr_kubernetes import constants
-from kuryr_kubernetes.controller.drivers import base as drivers
-from kuryr_kubernetes.handlers import k8s_base
-
-
-class KuryrNetPolicyHandler(k8s_base.ResourceEventHandler):
- """Controller side of KuryrNetPolicy process for Kubernetes pods.
-
- `KuryrNetPolicyHandler` runs on the Kuryr-Kubernetes controller and is
- responsible for deleting associated security groups upon namespace
- deletion.
- """
- OBJECT_KIND = constants.K8S_OBJ_KURYRNETPOLICY
- OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETPOLICIES
-
- def __init__(self):
- super(KuryrNetPolicyHandler, self).__init__()
- self._drv_policy = drivers.NetworkPolicyDriver.get_instance()
-
- def on_deleted(self, netpolicy_crd):
- crd_sg = netpolicy_crd['spec'].get('securityGroupId')
- if crd_sg:
- self._drv_policy.delete_np_sg(crd_sg)
diff --git a/kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py b/kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py
new file mode 100644
index 000000000..a93466161
--- /dev/null
+++ b/kuryr_kubernetes/controller/handlers/kuryrnetworkpolicy.py
@@ -0,0 +1,307 @@
+# Copyright 2019 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from openstack import exceptions as os_exc
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from kuryr_kubernetes import clients
+from kuryr_kubernetes import constants
+from kuryr_kubernetes.controller.drivers import base as drivers
+from kuryr_kubernetes.controller.drivers import utils as driver_utils
+from kuryr_kubernetes import exceptions
+from kuryr_kubernetes.handlers import k8s_base
+from kuryr_kubernetes import utils
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class KuryrNetworkPolicyHandler(k8s_base.ResourceEventHandler):
+ """Controller side of KuryrNetworkPolicy process for Kubernetes pods.
+
+ `KuryrNetworkPolicyHandler` runs on the kuryr-controller and is
+ responsible for creating and deleting SG and SG rules for `NetworkPolicy`.
+ The `KuryrNetworkPolicy` objects are created by `NetworkPolicyHandler`.
+ """
+ OBJECT_KIND = constants.K8S_OBJ_KURYRNETWORKPOLICY
+ OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRNETWORKPOLICIES
+
+ def __init__(self):
+ super(KuryrNetworkPolicyHandler, self).__init__()
+ self.os_net = clients.get_network_client()
+ self.k8s = clients.get_kubernetes_client()
+ self._drv_project = drivers.NetworkPolicyProjectDriver.get_instance()
+ self._drv_policy = drivers.NetworkPolicyDriver.get_instance()
+ self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
+ specific_driver='multi_pool')
+ self._drv_vif_pool.set_vif_driver()
+ self._drv_pod_sg = drivers.PodSecurityGroupsDriver.get_instance()
+ self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance()
+ self._drv_lbaas = drivers.LBaaSDriver.get_instance()
+
+ self._convert_old_crds()
+
+ def _convert_old_crds(self):
+ try:
+ netpolicies = self.k8s.get(constants.K8S_API_CRD_KURYRNETPOLICIES)
+ except exceptions.K8sClientException:
+ LOG.exception("Error when fetching old KuryrNetPolicy CRDs for "
+ "conversion.")
+ return
+
+ for netpolicy in netpolicies.get('items', []):
+ new_networkpolicy = self._drv_policy.get_from_old_crd(netpolicy)
+ url = (f"{constants.K8S_API_CRD_NAMESPACES}/"
+ f"{netpolicy['metadata']['namespace']}/"
+ f"kuryrnetworkpolicies")
+ try:
+ self.k8s.post(url, new_networkpolicy)
+ except exceptions.K8sConflict:
+ LOG.warning('KuryrNetworkPolicy %s already existed when '
+ 'converting KuryrNetPolicy %s. Ignoring.',
+ utils.get_res_unique_name(new_networkpolicy),
+ utils.get_res_unique_name(netpolicy))
+ self.k8s.delete(netpolicy['metadata']['selfLink'])
+
+ def _patch_kuryrnetworkpolicy_crd(self, knp, field, data,
+ action='replace'):
+ name = knp['metadata']['name']
+ LOG.debug('Patching KuryrNet CRD %s', name)
+ try:
+ status = self.k8s.patch_crd(field, knp['metadata']['selfLink'],
+ data, action=action)
+ except exceptions.K8sResourceNotFound:
+ LOG.debug('KuryrNetworkPolicy CRD not found %s', name)
+ return None
+ except exceptions.K8sClientException:
+ LOG.exception('Error updating KuryrNetworkPolicy CRD %s', name)
+ raise
+
+ knp['status'] = status
+ return knp
+
+ def _get_networkpolicy(self, link):
+ return self.k8s.get(link)
+
+ def _compare_sgs(self, a, b):
+ checked_props = ('direction', 'ethertype', 'port_range_max',
+ 'port_range_min', 'protocol', 'remote_ip_prefix')
+
+ for k in checked_props:
+ if a.get(k) != b.get(k):
+ return False
+ return True
+
+ def _find_sgs(self, a, rules):
+ for r in rules:
+ if self._compare_sgs(r, a):
+ return True
+
+ return False
+
+ def on_present(self, knp):
+ uniq_name = utils.get_res_unique_name(knp)
+ LOG.debug('on_present() for NP %s', uniq_name)
+ project_id = self._drv_project.get_project(knp)
+ if not knp['status'].get('securityGroupId'):
+ LOG.debug('Creating SG for NP %s', uniq_name)
+ # TODO(dulek): Do this right, why do we have a project driver per
+ # resource?! This one expects policy, not knp, but it
+ # ignores it anyway!
+ sg_id = self._drv_policy.create_security_group(knp, project_id)
+ knp = self._patch_kuryrnetworkpolicy_crd(
+ knp, 'status', {'securityGroupId': sg_id})
+ LOG.debug('Created SG %s for NP %s', sg_id, uniq_name)
+ else:
+ # TODO(dulek): Check if it really exists, recreate if not.
+ sg_id = knp['status'].get('securityGroupId')
+
+ # First update SG rules as we want to apply updated ones
+ current = knp['status']['securityGroupRules']
+ required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules']
+ required = [r['sgRule'] for r in required]
+
+ # FIXME(dulek): This *might* be prone to race conditions if failure
+ # happens between SG rule is created/deleted and status
+ # is annotated. We don't however need to revert on failed
+ # K8s operations - creation, deletion of SG rules and
+ # attaching or detaching SG from ports are idempotent
+ # so we can repeat them. What worries me is losing track
+ # of an update due to restart. The only way to do it
+ # would be to periodically check if what's in `status`
+ # is the reality in OpenStack API. That should be just
+ # two Neutron API calls + possible resync.
+ to_add = []
+ to_remove = []
+ for r in required:
+ if not self._find_sgs(r, current):
+ to_add.append(r)
+
+ for i, c in enumerate(current):
+ if not self._find_sgs(c, required):
+ to_remove.append((i, c['id']))
+
+ LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add)
+
+ for sg_rule in to_add:
+ LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name)
+ sg_rule['security_group_id'] = sg_id
+ sgr_id = driver_utils.create_security_group_rule(sg_rule)
+ sg_rule['id'] = sgr_id
+ knp = self._patch_kuryrnetworkpolicy_crd(
+ knp, 'status', {'securityGroupRules/-': sg_rule}, 'add')
+
+ # We need to remove starting from the last one in order to maintain
+ # indexes. Please note this will start to fail miserably if we start
+ # to change status from multiple places.
+ to_remove.reverse()
+
+ LOG.debug('SGs to remove for NP %s: %s', uniq_name,
+ [x[1] for x in to_remove])
+
+ for i, sg_rule_id in to_remove:
+ LOG.debug('Removing SG rule %s as it is no longer part of NP %s',
+ sg_rule_id, uniq_name)
+ driver_utils.delete_security_group_rule(sg_rule_id)
+ knp = self._patch_kuryrnetworkpolicy_crd(
+ knp, 'status/securityGroupRules', i, 'remove')
+
+ pods_to_update = []
+
+ previous_sel = knp['status'].get('podSelector', None)
+ current_sel = knp['spec']['podSelector']
+ if previous_sel is None:
+ # Fresh NetworkPolicy that was never applied.
+ pods_to_update.extend(self._drv_policy.namespaced_pods(knp))
+ elif previous_sel != current_sel or previous_sel == {}:
+ pods_to_update.extend(
+ self._drv_policy.affected_pods(knp, previous_sel))
+
+ matched_pods = self._drv_policy.affected_pods(knp)
+ pods_to_update.extend(matched_pods)
+
+ for pod in pods_to_update:
+ if driver_utils.is_host_network(pod):
+ continue
+ pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
+ self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
+
+ # FIXME(dulek): We should not need this one day.
+ policy = self._get_networkpolicy(knp['metadata']['annotations']
+ ['networkPolicyLink'])
+ if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules and
+ not self._is_egress_only_policy(policy)):
+ # NOTE(ltomasbo): only need to change services if the pods that
+ # they point to are updated
+ services = driver_utils.get_services(knp['metadata']['namespace'])
+ for service in services.get('items', []):
+ # TODO(ltomasbo): Skip other services that are not affected
+ # by the policy
+ # FIXME(dulek): Make sure to include svcs without selector when
+ # we start supporting them.
+ if (not service['spec'].get('selector') or not
+ self._is_service_affected(service, pods_to_update)):
+ continue
+ sgs = self._drv_svc_sg.get_security_groups(service, project_id)
+ self._drv_lbaas.update_lbaas_sg(service, sgs)
+
+ self._patch_kuryrnetworkpolicy_crd(knp, 'status',
+ {'podSelector': current_sel})
+
+ def _is_service_affected(self, service, affected_pods):
+ svc_namespace = service['metadata']['namespace']
+ svc_selector = service['spec'].get('selector')
+ svc_pods = driver_utils.get_pods({'selector': svc_selector},
+ svc_namespace).get('items')
+ return any(pod in svc_pods for pod in affected_pods)
+
+ def _is_egress_only_policy(self, policy):
+ policy_types = policy['spec'].get('policyTypes', [])
+ return (policy_types == ['Egress'] or
+ (policy['spec'].get('egress') and
+ not policy['spec'].get('ingress')))
+
+ def _get_policy_net_id(self, knp):
+ policy_ns = knp['metadata']['namespace']
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ path = (f'{constants.K8S_API_CRD_NAMESPACES}/{policy_ns}/'
+ f'kuryrnetworks/{policy_ns}')
+ net_crd = kubernetes.get(path)
+ except exceptions.K8sClientException:
+ LOG.exception("Kubernetes Client Exception.")
+ raise
+ return net_crd['status']['netId']
+
+ def on_finalize(self, knp):
+ LOG.debug("Finalizing KuryrNetworkPolicy %s")
+ project_id = self._drv_project.get_project(knp)
+ pods_to_update = self._drv_policy.affected_pods(knp)
+ crd_sg = knp['status'].get('securityGroupId')
+ try:
+ policy = self._get_networkpolicy(knp['metadata']['annotations']
+ ['networkPolicyLink'])
+ except exceptions.K8sResourceNotFound:
+ # NP is already gone, let's just try to clean up.
+ policy = None
+
+ if crd_sg:
+ for pod in pods_to_update:
+ if driver_utils.is_host_network(pod):
+ continue
+ pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
+ if crd_sg in pod_sgs:
+ pod_sgs.remove(crd_sg)
+ if not pod_sgs:
+ pod_sgs = CONF.neutron_defaults.pod_security_groups
+ if not pod_sgs:
+ raise cfg.RequiredOptError(
+ 'pod_security_groups',
+ cfg.OptGroup('neutron_defaults'))
+ try:
+ self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
+ except os_exc.NotFoundException:
+ LOG.debug("Fail to update pod sgs."
+ " Retrying policy deletion.")
+ raise exceptions.ResourceNotReady(knp)
+
+ # ensure ports at the pool don't have the NP sg associated
+ try:
+ net_id = self._get_policy_net_id(knp)
+ self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)
+ except exceptions.K8sResourceNotFound:
+ # Probably the network got removed already, we can ignore it.
+ pass
+
+ if (CONF.octavia_defaults.enforce_sg_rules and policy and
+ not self._is_egress_only_policy(policy)):
+ services = driver_utils.get_services(
+ knp['metadata']['namespace'])
+ for svc in services.get('items'):
+ if (not svc['spec'].get('selector') or not
+ self._is_service_affected(svc, pods_to_update)):
+ continue
+ sgs = self._drv_svc_sg.get_security_groups(svc, project_id)
+ self._drv_lbaas.update_lbaas_sg(svc, sgs)
+
+ self._drv_policy.delete_np_sg(crd_sg)
+
+ LOG.debug("Removing finalizers from KuryrNetworkPolicy and "
+ "NetworkPolicy.")
+ if policy:
+ self.k8s.remove_finalizer(policy,
+ constants.NETWORKPOLICY_FINALIZER)
+ self.k8s.remove_finalizer(knp, constants.NETWORKPOLICY_FINALIZER)
diff --git a/kuryr_kubernetes/controller/handlers/kuryrport.py b/kuryr_kubernetes/controller/handlers/kuryrport.py
new file mode 100644
index 000000000..4e7856fae
--- /dev/null
+++ b/kuryr_kubernetes/controller/handlers/kuryrport.py
@@ -0,0 +1,284 @@
+# Copyright 2020 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+from openstack import exceptions as os_exc
+from os_vif import objects
+from oslo_config import cfg as oslo_cfg
+from oslo_log import log as logging
+
+from kuryr_kubernetes import clients
+from kuryr_kubernetes import constants
+from kuryr_kubernetes.controller.drivers import base as drivers
+from kuryr_kubernetes.controller.drivers import utils as driver_utils
+from kuryr_kubernetes.controller.managers import prometheus_exporter as exp
+from kuryr_kubernetes import exceptions as k_exc
+from kuryr_kubernetes.handlers import k8s_base
+
+LOG = logging.getLogger(__name__)
+KURYRPORT_URI = constants.K8S_API_CRD_NAMESPACES + '/{ns}/kuryrports/{crd}'
+
+
+class KuryrPortHandler(k8s_base.ResourceEventHandler):
+ """Controller side of KuryrPort process for Kubernetes pods.
+
+ `KuryrPortHandler` runs on the Kuryr-Kubernetes controller and is
+ responsible for creating/removing the OpenStack resources associated to
+ the newly created pods, namely ports and update the KuryrPort CRD data.
+ """
+ OBJECT_KIND = constants.K8S_OBJ_KURYRPORT
+ OBJECT_WATCH_PATH = constants.K8S_API_CRD_KURYRPORTS
+
+ def __init__(self):
+ super(KuryrPortHandler, self).__init__()
+ self._drv_project = drivers.PodProjectDriver.get_instance()
+ self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
+ self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
+ # REVISIT(ltomasbo): The VIF Handler should not be aware of the pool
+ # directly. Due to the lack of a mechanism to load and set the
+ # VIFHandler driver, for now it is aware of the pool driver, but this
+ # will be reverted as soon as a mechanism is in place.
+ self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
+ specific_driver='multi_pool')
+ self._drv_vif_pool.set_vif_driver()
+ self._drv_multi_vif = drivers.MultiVIFDriver.get_enabled_drivers()
+ if self._is_network_policy_enabled():
+ self._drv_lbaas = drivers.LBaaSDriver.get_instance()
+ self._drv_svc_sg = (drivers.ServiceSecurityGroupsDriver
+ .get_instance())
+ self.k8s = clients.get_kubernetes_client()
+
+ def on_present(self, kuryrport_crd):
+ if not kuryrport_crd['spec']['vifs']:
+ # Get vifs
+ if not self.get_vifs(kuryrport_crd):
+ # Ignore this event, according to one of the cases logged in
+ # get_vifs method.
+ return
+
+ vifs = {ifname: {'default': data['default'],
+ 'vif': objects.base.VersionedObject
+ .obj_from_primitive(data['vif'])}
+ for ifname, data in kuryrport_crd['spec']['vifs'].items()}
+
+ if all([v['vif'].active for v in vifs.values()]):
+ return
+
+ changed = False
+
+ try:
+ for ifname, data in vifs.items():
+ if (data['vif'].plugin == constants.KURYR_VIF_TYPE_SRIOV and
+ oslo_cfg.CONF.sriov.enable_node_annotations):
+ pod_node = kuryrport_crd['spec']['podNodeName']
+ # TODO(gryf): This probably will need adoption, so it will
+ # add information to CRD instead of the pod.
+ driver_utils.update_port_pci_info(pod_node, data['vif'])
+ if not data['vif'].active:
+ try:
+ self._drv_vif_pool.activate_vif(data['vif'])
+ changed = True
+ except os_exc.ResourceNotFound:
+ LOG.debug("Port not found, possibly already deleted. "
+ "No need to activate it")
+ finally:
+ if changed:
+ try:
+ name = kuryrport_crd['metadata']['name']
+ namespace = kuryrport_crd['metadata']['namespace']
+ pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
+ f"/{namespace}/pods/{name}")
+ except k_exc.K8sResourceNotFound as ex:
+ LOG.exception("Failed to get pod: %s", ex)
+ raise
+
+ project_id = self._drv_project.get_project(pod)
+
+ try:
+ self._update_kuryrport_crd(kuryrport_crd, vifs)
+ except k_exc.K8sResourceNotFound as ex:
+ LOG.exception("Failed to update KuryrPort CRD: %s", ex)
+ security_groups = self._drv_sg.get_security_groups(
+ pod, project_id)
+ for ifname, data in vifs.items():
+ self._drv_vif_pool.release_vif(pod, data['vif'],
+ project_id,
+ security_groups)
+ except k_exc.K8sClientException:
+ raise k_exc.ResourceNotReady(pod['metadata']['name'])
+ try:
+ self._record_pod_creation_metric(pod)
+ except Exception:
+ LOG.debug("Failed to record metric for pod %s", name)
+ if self._is_network_policy_enabled():
+ crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
+ if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
+ services = driver_utils.get_services()
+ self._update_services(services, crd_pod_selectors,
+ project_id)
+
+ def on_finalize(self, kuryrport_crd):
+ name = kuryrport_crd['metadata']['name']
+ namespace = kuryrport_crd['metadata']['namespace']
+ try:
+ pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
+ f"/{namespace}/pods/{name}")
+ except k_exc.K8sResourceNotFound as ex:
+ LOG.exception("Failed to get pod: %s", ex)
+ # TODO(gryf): Free resources
+ self.k8s.remove_finalizer(kuryrport_crd, constants.POD_FINALIZER)
+ raise
+
+ if (driver_utils.is_host_network(pod) or
+ not pod['spec'].get('nodeName')):
+ return
+
+ project_id = self._drv_project.get_project(pod)
+ try:
+ crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
+ except k_exc.ResourceNotReady:
+ # NOTE(ltomasbo): If the pod is being deleted before
+ # kuryr-controller annotated any information about the port
+ # associated, there is no need for deleting sg rules associated to
+ # it. So this exception could be safetly ignored for the current
+ # sg drivers. Only the NP driver associates rules to the pods ips,
+ # and that waits for annotations to start.
+ #
+ # NOTE(gryf): perhaps we don't need to handle this case, since
+ # during CRD creation all the things, including security groups
+ # rules would be created too.
+ LOG.debug("Skipping SG rules deletion associated to the pod %s",
+ pod)
+ crd_pod_selectors = []
+ try:
+ security_groups = self._drv_sg.get_security_groups(pod, project_id)
+ except k_exc.ResourceNotReady:
+ # NOTE(ltomasbo): If the namespace object gets deleted first the
+ # namespace security group driver will raise a ResourceNotReady
+ # exception as it cannot access anymore the kuryrnetwork CRD
+ # annotated on the namespace object. In such case we set security
+ # groups to empty list so that if pools are enabled they will be
+ # properly released.
+ security_groups = []
+
+ for data in kuryrport_crd['spec']['vifs'].values():
+ vif = objects.base.VersionedObject.obj_from_primitive(data['vif'])
+ self._drv_vif_pool.release_vif(pod, vif, project_id,
+ security_groups)
+ if (self._is_network_policy_enabled() and crd_pod_selectors and
+ oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
+ services = driver_utils.get_services()
+ self._update_services(services, crd_pod_selectors, project_id)
+
+ # Remove finalizer out of pod.
+ self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)
+
+ # Finally, remove finalizer from KuryrPort CRD
+ self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)
+
+ def get_vifs(self, kuryrport_crd):
+ try:
+ pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
+ f"/{kuryrport_crd['metadata']['namespace']}"
+ f"/pods"
+ f"/{kuryrport_crd['metadata']['name']}")
+ except k_exc.K8sResourceNotFound as ex:
+ LOG.exception("Failed to get pod: %s", ex)
+ # TODO(gryf): Release resources
+ self.k8s.remove_finalizer(kuryrport_crd,
+ constants.KURYRPORT_FINALIZER)
+ raise
+
+ project_id = self._drv_project.get_project(pod)
+ security_groups = self._drv_sg.get_security_groups(pod, project_id)
+ try:
+ subnets = self._drv_subnets.get_subnets(pod, project_id)
+ except (os_exc.ResourceNotFound, k_exc.K8sResourceNotFound):
+ LOG.warning("Subnet does not exists. If namespace driver is "
+ "used, probably the namespace for the pod is "
+ "already deleted. So this pod does not need to "
+ "get a port as it will be deleted too. If the "
+ "default subnet driver is used, then you must "
+ "select an existing subnet to be used by Kuryr.")
+ return False
+
+ # Request the default interface of pod
+ main_vif = self._drv_vif_pool.request_vif(
+ pod, project_id, subnets, security_groups)
+
+ if not main_vif:
+ pod_name = pod['metadata']['name']
+ LOG.warning("Ignoring event due to pod %s not being "
+ "scheduled yet.", pod_name)
+ return False
+
+ vifs = {constants.DEFAULT_IFNAME: {'default': True, 'vif': main_vif}}
+
+ # Request the additional interfaces from multiple drivers
+ index = 0
+ for driver in self._drv_multi_vif:
+ additional_vifs = driver.request_additional_vifs(pod, project_id,
+ security_groups)
+ for index, vif in enumerate(additional_vifs, start=index+1):
+ ifname = (oslo_cfg.CONF.kubernetes.additional_ifname_prefix +
+ str(index))
+ vifs[ifname] = {'default': False, 'vif': vif}
+
+ try:
+ self._update_kuryrport_crd(kuryrport_crd, vifs)
+ except k_exc.K8sClientException as ex:
+ LOG.exception("Kubernetes Client Exception creating "
+ "KuryrPort CRD: %s", ex)
+ for ifname, data in vifs.items():
+ self._drv_vif_pool.release_vif(pod, data['vif'],
+ project_id,
+ security_groups)
+ return True
+
+ def _update_kuryrport_crd(self, kuryrport_crd, vifs):
+ LOG.debug('Updatting CRD %s', kuryrport_crd["metadata"]["name"])
+ spec = {}
+ for ifname, data in vifs.items():
+ data['vif'].obj_reset_changes(recursive=True)
+ spec[ifname] = {'default': data['default'],
+ 'vif': data['vif'].obj_to_primitive()}
+
+ self.k8s.patch_crd('spec', kuryrport_crd['metadata']['selfLink'],
+ {'vifs': spec})
+
+ def _is_network_policy_enabled(self):
+ enabled_handlers = oslo_cfg.CONF.kubernetes.enabled_handlers
+ svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
+ return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
+
+ def _update_services(self, services, crd_pod_selectors, project_id):
+ for service in services.get('items'):
+ if not driver_utils.service_matches_affected_pods(
+ service, crd_pod_selectors):
+ continue
+ sgs = self._drv_svc_sg.get_security_groups(service,
+ project_id)
+ self._drv_lbaas.update_lbaas_sg(service, sgs)
+
+ def _record_pod_creation_metric(self, pod):
+ exporter = exp.ControllerPrometheusExporter.get_instance()
+ for condition in pod['status'].get('conditions'):
+ if condition['type'] == 'PodScheduled' and condition['status']:
+ f_str = "%Y-%m-%dT%H:%M:%SZ"
+ time_obj = datetime.datetime.strptime(
+ condition['lastTransitionTime'], f_str)
+ pod_creation_time = datetime.datetime.now() - time_obj
+ pod_creation_sec = (pod_creation_time).total_seconds()
+ exporter.record_pod_creation_metric(pod_creation_sec)
diff --git a/kuryr_kubernetes/controller/handlers/lbaas.py b/kuryr_kubernetes/controller/handlers/lbaas.py
index c22c33531..5884a0ef6 100644
--- a/kuryr_kubernetes/controller/handlers/lbaas.py
+++ b/kuryr_kubernetes/controller/handlers/lbaas.py
@@ -13,18 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import eventlet
-import time
-
from kuryr.lib._i18n import _
-from openstack import exceptions as os_exc
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drv_base
-from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes.objects import lbaas as obj_lbaas
@@ -35,10 +30,10 @@
SUPPORTED_SERVICE_TYPES = ('ClusterIP', 'LoadBalancer')
-class LBaaSSpecHandler(k8s_base.ResourceEventHandler):
- """LBaaSSpecHandler handles K8s Service events.
+class ServiceHandler(k8s_base.ResourceEventHandler):
+ """ServiceHandler handles K8s Service events.
- LBaaSSpecHandler handles K8s Service events and updates related Endpoints
+ ServiceHandler handles K8s Service events and updates related Endpoints
with LBaaSServiceSpec when necessary.
"""
@@ -46,14 +41,12 @@ class LBaaSSpecHandler(k8s_base.ResourceEventHandler):
OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "services")
def __init__(self):
- super(LBaaSSpecHandler, self).__init__()
+ super(ServiceHandler, self).__init__()
self._drv_project = drv_base.ServiceProjectDriver.get_instance()
self._drv_subnets = drv_base.ServiceSubnetsDriver.get_instance()
self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance()
def on_present(self, service):
- lbaas_spec = utils.get_lbaas_spec(service)
-
if self._should_ignore(service):
LOG.debug("Skipping Kubernetes service %s of an unsupported kind "
"or without a selector as Kubernetes does not create "
@@ -61,9 +54,18 @@ def on_present(self, service):
service['metadata']['name'])
return
- if self._has_lbaas_spec_changes(service, lbaas_spec):
- lbaas_spec = self._generate_lbaas_spec(service)
- utils.set_lbaas_spec(service, lbaas_spec)
+ k8s = clients.get_kubernetes_client()
+ loadbalancer_crd = k8s.get_loadbalancer_crd(service)
+ try:
+ self._patch_service_finalizer(service)
+ except k_exc.K8sClientException as ex:
+ LOG.exception("Failed to set service finalizer: %s", ex)
+ raise
+
+ if loadbalancer_crd is None:
+ loadbalancer_crd = self.create_crd_spec(service)
+ elif self._has_lbaas_spec_changes(service, loadbalancer_crd):
+ loadbalancer_crd = self._update_crd_spec(loadbalancer_crd, service)
def _is_supported_type(self, service):
spec = service['spec']
@@ -75,12 +77,22 @@ def _get_service_ip(self, service):
return None
def _should_ignore(self, service):
- return (not(self._has_selector(service)) or
- not(self._has_clusterip(service)) or
+ return (not(self._has_clusterip(service)) or
not(self._is_supported_type(service)))
- def _has_selector(self, service):
- return service['spec'].get('selector')
+ def _patch_service_finalizer(self, service):
+ k8s = clients.get_kubernetes_client()
+ k8s.add_finalizer(service, k_const.SERVICE_FINALIZER)
+
+ def on_finalize(self, service):
+ k8s = clients.get_kubernetes_client()
+
+ svc_name = service['metadata']['name']
+ svc_namespace = service['metadata']['namespace']
+
+ klb_crd_path = (f"{k_const.K8S_API_CRD_NAMESPACES}/"
+ f"{svc_namespace}/kuryrloadbalancers/{svc_name}")
+ k8s.delete(klb_crd_path)
def _has_clusterip(self, service):
# ignore headless service, clusterIP is None
@@ -97,45 +109,113 @@ def _get_subnet_id(self, service, project_id, ip):
if len(subnet_ids) != 1:
raise k_exc.IntegrityError(_(
"Found %(num)s subnets for service %(link)s IP %(ip)s") % {
- 'link': service['metadata']['selfLink'],
- 'ip': ip,
- 'num': len(subnet_ids)})
+ 'link': service['metadata']['selfLink'],
+ 'ip': ip,
+ 'num': len(subnet_ids)})
return subnet_ids.pop()
- def _generate_lbaas_spec(self, service):
+ def create_crd_spec(self, service):
+ svc_name = service['metadata']['name']
+ svc_namespace = service['metadata']['namespace']
+ kubernetes = clients.get_kubernetes_client()
+ svc_ip = self._get_service_ip(service)
+ spec_lb_ip = service['spec'].get('loadBalancerIP')
+ ports = service['spec'].get('ports')
+ for port in ports:
+ if type(port['targetPort']) == int:
+ port['targetPort'] = str(port['targetPort'])
project_id = self._drv_project.get_project(service)
- ip = self._get_service_ip(service)
- subnet_id = self._get_subnet_id(service, project_id, ip)
- ports = self._generate_lbaas_port_specs(service)
sg_ids = self._drv_sg.get_security_groups(service, project_id)
+ subnet_id = self._get_subnet_id(service, project_id, svc_ip)
spec_type = service['spec'].get('type')
- spec_lb_ip = service['spec'].get('loadBalancerIP')
+ loadbalancer_crd = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrLoadBalancer',
+ 'metadata': {
+ 'name': svc_name,
+ 'finalizers': [k_const.KURYRLB_FINALIZER],
+ },
+ 'spec': {
+ 'ip': svc_ip,
+ 'ports': ports,
+ 'project_id': project_id,
+ 'security_groups_ids': sg_ids,
+ 'subnet_id': subnet_id,
+ 'type': spec_type
+ },
+ 'status': {
+ }
+ }
+
+ if spec_lb_ip is not None:
+ loadbalancer_crd['spec']['lb_ip'] = spec_lb_ip
+
+ try:
+ kubernetes.post('{}/{}/kuryrloadbalancers'.format(
+ k_const.K8S_API_CRD_NAMESPACES, svc_namespace),
+ loadbalancer_crd)
+ except k_exc.K8sConflict:
+ raise k_exc.ResourceNotReady(svc_name)
+ except k_exc.K8sClientException:
+ LOG.exception("Kubernetes Client Exception creating "
+ "kuryrloadbalancer CRD. %s"
+ % k_exc.K8sClientException)
+ raise
+ return loadbalancer_crd
- return obj_lbaas.LBaaSServiceSpec(ip=ip,
- project_id=project_id,
- subnet_id=subnet_id,
- ports=ports,
- security_groups_ids=sg_ids,
- type=spec_type,
- lb_ip=spec_lb_ip)
+ def _update_crd_spec(self, loadbalancer_crd, service):
+ svc_ip = self._get_service_ip(service)
+ ports = service['spec'].get('ports')
+ for port in ports:
+ if type(port['targetPort']) == int:
+ port['targetPort'] = str(port['targetPort'])
+ project_id = self._drv_project.get_project(service)
+ sg_ids = self._drv_sg.get_security_groups(service, project_id)
+ subnet_id = self._get_subnet_id(service, project_id, svc_ip)
+ spec_type = service['spec'].get('type')
+ kubernetes = clients.get_kubernetes_client()
+
+ patch = {
+ 'spec': {
+ 'ip': svc_ip,
+ 'ports': ports,
+ 'project_id': project_id,
+ 'security_groups_ids': sg_ids,
+ 'subnet_id': subnet_id,
+ 'type': spec_type
+ }
+ }
+
+ LOG.debug('Patching KuryrLoadBalancer CRD %s', loadbalancer_crd)
+ try:
+ kubernetes.patch_crd('spec', loadbalancer_crd['metadata'][
+ 'selfLink'], patch['spec'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd)
+ except k_exc.K8sConflict:
+ raise k_exc.ResourceNotReady(loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating kuryrnet CRD %s', loadbalancer_crd)
+ raise
+ return loadbalancer_crd
- def _has_lbaas_spec_changes(self, service, lbaas_spec):
- return (self._has_ip_changes(service, lbaas_spec) or
- utils.has_port_changes(service, lbaas_spec))
+ def _has_lbaas_spec_changes(self, service, loadbalancer_crd):
+ return (self._has_ip_changes(service, loadbalancer_crd) or
+ utils.has_port_changes(service, loadbalancer_crd))
- def _has_ip_changes(self, service, lbaas_spec):
+ def _has_ip_changes(self, service, loadbalancer_crd):
link = service['metadata']['selfLink']
svc_ip = self._get_service_ip(service)
- if not lbaas_spec:
- if svc_ip:
- LOG.debug("LBaaS spec is missing for %(link)s"
- % {'link': link})
- return True
- elif str(lbaas_spec.ip) != svc_ip:
+ if loadbalancer_crd['spec'].get('ip') is None:
+ if svc_ip is None:
+ return False
+ return True
+
+ elif str(loadbalancer_crd['spec'].get('ip')) != svc_ip:
LOG.debug("LBaaS spec IP %(spec_ip)s != %(svc_ip)s for %(link)s"
- % {'spec_ip': lbaas_spec.ip,
+ % {'spec_ip': loadbalancer_crd['spec']['ip'],
'svc_ip': svc_ip,
'link': link})
return True
@@ -147,10 +227,10 @@ def _generate_lbaas_port_specs(self, service):
for port in utils.get_service_ports(service)]
-class LoadBalancerHandler(k8s_base.ResourceEventHandler):
- """LoadBalancerHandler handles K8s Endpoints events.
+class EndpointsHandler(k8s_base.ResourceEventHandler):
+ """EndpointsHandler handles K8s Endpoints events.
- LoadBalancerHandler handles K8s Endpoints events and tracks changes in
+ EndpointsHandler handles K8s Endpoints events and tracks changes in
LBaaSServiceSpec to update Neutron LBaaS accordingly and to reflect its'
actual state in LBaaSState.
"""
@@ -159,13 +239,11 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "endpoints")
def __init__(self):
- super(LoadBalancerHandler, self).__init__()
+ super(EndpointsHandler, self).__init__()
self._drv_lbaas = drv_base.LBaaSDriver.get_instance()
self._drv_pod_project = drv_base.PodProjectDriver.get_instance()
self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance()
self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
- self._drv_project = drv_base.ServiceProjectDriver.get_instance()
- self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance()
# Note(yboaron) LBaaS driver supports 'provider' parameter in
# Load Balancer creation flow.
# We need to set the requested load balancer provider
@@ -175,104 +253,40 @@ def __init__(self):
!= 'default'):
self._lb_provider = (
config.CONF.kubernetes.endpoints_driver_octavia_provider)
- eventlet.spawn(self._cleanup_leftover_lbaas)
def on_present(self, endpoints):
- lbaas_spec = utils.get_lbaas_spec(endpoints)
- if self._should_ignore(endpoints, lbaas_spec):
+ k8s = clients.get_kubernetes_client()
+ loadbalancer_crd = k8s.get_loadbalancer_crd(endpoints)
+
+ if not self._has_pods(endpoints):
LOG.debug("Ignoring Kubernetes endpoints %s",
endpoints['metadata']['name'])
return
- lbaas_state = utils.get_lbaas_state(endpoints)
- if not lbaas_state:
- lbaas_state = obj_lbaas.LBaaSState()
- elif (lbaas_state.loadbalancer and self._lb_provider and
- self._lb_provider != lbaas_state.loadbalancer.provider):
- LOG.info("LoadBalancer associated to the service does not match "
- "the current provider: %s", lbaas_state.loadbalancer.id)
- lb_client = clients.get_loadbalancer_client()
- try:
- lb_client.get_load_balancer(lbaas_state.loadbalancer.id)
- except os_exc.NotFoundException:
- # NOTE(ltomasbo): If the loadbalancer is gone, remove the
- # annotations to ensure it is reprocessed
- lbaas_state.loadbalancer = None
- lbaas_state.pools = []
- lbaas_state.listeners = []
- lbaas_state.members = []
- utils.set_lbaas_state(endpoints, lbaas_state)
- return
+ if loadbalancer_crd is None:
+ loadbalancer_crd = self._create_crd_spec(endpoints)
+ else:
+ loadbalancer_crd = self._update_crd_spec(loadbalancer_crd,
+ endpoints)
+
+ def _has_lbaas_spec_changes(self, endpoints, loadbalancer_crd):
+ return (self._has_ip_changes(endpoints, loadbalancer_crd) or
+ utils.has_port_changes(endpoints, loadbalancer_crd))
+
+ def _has_ip_changes(self, endpoints, loadbalancer_crd):
+ link = endpoints['metadata']['selfLink']
+ endpoint_ip = endpoints['subsets']['addresses'].get('ip')
+ endpoint_crd_ip = loadbalancer_crd['spec'].get('ip')
+
+ if endpoint_crd_ip != endpoint_ip:
+ LOG.debug("LBaaS spec IP %(endpoint_crd_ip)s !="
+ " %(endpoint_ip)s for %(link)s"
+ % {'endpoint_crd_ip': endpoint_crd_ip,
+ 'endpoint_ip': endpoint_ip,
+ 'link': link})
+ return True
- if self._sync_lbaas_members(endpoints, lbaas_state, lbaas_spec):
- # Note(yboaron) For LoadBalancer services, we should allocate FIP,
- # associate it to LB VIP and update K8S service status
- if lbaas_state.service_pub_ip_info is None:
- service_pub_ip_info = (
- self._drv_service_pub_ip.acquire_service_pub_ip_info(
- lbaas_spec.type,
- lbaas_spec.lb_ip,
- lbaas_spec.project_id,
- lbaas_state.loadbalancer.port_id))
- if service_pub_ip_info:
- self._drv_service_pub_ip.associate_pub_ip(
- service_pub_ip_info, lbaas_state.loadbalancer.port_id)
- lbaas_state.service_pub_ip_info = service_pub_ip_info
- self._update_lb_status(
- endpoints,
- lbaas_state.service_pub_ip_info.ip_addr)
- # REVISIT(ivc): since _sync_lbaas_members is responsible for
- # creating all lbaas components (i.e. load balancer, listeners,
- # pools, members), it is currently possible for it to fail (due
- # to invalid Kuryr/K8s/Neutron configuration, e.g. Members' IPs
- # not belonging to configured Neutron subnet or Service IP being
- # in use by gateway or VMs) leaving some Neutron entities without
- # properly updating annotation. Some sort of failsafe mechanism is
- # required to deal with such situations (e.g. cleanup, or skip
- # failing items, or validate configuration) to prevent annotation
- # being out of sync with the actual Neutron state.
- try:
- utils.set_lbaas_state(endpoints, lbaas_state)
- except k_exc.K8sResourceNotFound:
- # Note(yboaron) It's impossible to store neutron resources
- # in K8S object since object was deleted. In that case
- # we should rollback all neutron resources.
- LOG.debug("LoadBalancerHandler failed to store Openstack "
- "resources in K8S object (not found)")
- self.on_deleted(endpoints, lbaas_state)
-
- def on_deleted(self, endpoints, lbaas_state=None):
- if lbaas_state is None:
- lbaas_state = utils.get_lbaas_state(endpoints)
- if not lbaas_state:
- return
- # NOTE(ivc): deleting pool deletes its members
- self._drv_lbaas.release_loadbalancer(
- loadbalancer=lbaas_state.loadbalancer)
- if lbaas_state.service_pub_ip_info:
- self._drv_service_pub_ip.release_pub_ip(
- lbaas_state.service_pub_ip_info)
-
- def _should_ignore(self, endpoints, lbaas_spec):
- # NOTE(ltomasbo): we must wait until service handler has annotated the
- # endpoints to process them. Thus, if annotations are not updated to
- # match the endpoints information, we should skip the event
- return not(lbaas_spec and
- self._has_pods(endpoints) and
- self._svc_handler_annotations_updated(endpoints,
- lbaas_spec))
-
- def _svc_handler_annotations_updated(self, endpoints, lbaas_spec):
- svc_link = self._get_service_link(endpoints)
- k8s = clients.get_kubernetes_client()
- service = k8s.get(svc_link)
- if utils.has_port_changes(service, lbaas_spec):
- # NOTE(ltomasbo): Ensuring lbaas_spec annotated on the endpoints
- # is in sync with the service status, i.e., upon a service
- # modification it will ensure endpoint modifications are not
- # handled until the service handler has performed its annotations
- return False
- return True
+ return False
def _has_pods(self, endpoints):
ep_subsets = endpoints.get('subsets', [])
@@ -283,327 +297,58 @@ def _has_pods(self, endpoints):
for address in subset.get('addresses', [])
if address.get('targetRef', {}).get('kind') == 'Pod')
- def _sync_lbaas_members(self, endpoints, lbaas_state, lbaas_spec):
- changed = False
-
- if (self._has_pods(endpoints) and
- self._remove_unused_members(endpoints, lbaas_state,
- lbaas_spec)):
- changed = True
-
- if self._sync_lbaas_pools(endpoints, lbaas_state, lbaas_spec):
- changed = True
-
- if (self._has_pods(endpoints) and
- self._add_new_members(endpoints, lbaas_state, lbaas_spec)):
- changed = True
-
- return changed
-
- def _sync_lbaas_sgs(self, endpoints, lbaas_state):
- svc_link = self._get_service_link(endpoints)
- k8s = clients.get_kubernetes_client()
- service = k8s.get(svc_link)
-
- lb = lbaas_state.loadbalancer
- # NOTE(maysams) It's possible that while the service annotation
- # is added the backend pods on that service are not yet created
- # resulting in no security groups retrieved for the service.
- # Let's retrieve again to ensure is updated.
- project_id = self._drv_project.get_project(service)
- lb_sgs = self._drv_sg.get_security_groups(service, project_id)
- lb.security_groups = lb_sgs
-
- def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):
- changed = False
-
- if config.CONF.octavia_defaults.enforce_sg_rules:
- try:
- self._sync_lbaas_sgs(endpoints, lbaas_state)
- except k_exc.K8sResourceNotFound:
- LOG.debug("The svc has been deleted while processing"
- " the endpoints update. No need to add new"
- " members.")
-
- lsnr_by_id = {listener.id: listener
- for listener in lbaas_state.listeners}
- pool_by_lsnr_port = {(lsnr_by_id[p.listener_id].protocol,
- lsnr_by_id[p.listener_id].port): p
- for p in lbaas_state.pools}
-
- # NOTE(yboaron): Since LBaaSv2 doesn't support UDP load balancing,
- # the LBaaS driver will return 'None' in case of UDP port
- # listener creation.
- # we should consider the case in which
- # 'pool_by_lsnr_port[p.protocol, p.port]' is missing
- pool_by_tgt_name = {}
- for p in lbaas_spec.ports:
- try:
- pool_by_tgt_name[p.name] = pool_by_lsnr_port[p.protocol,
- p.port]
- except KeyError:
- continue
- current_targets = {(str(m.ip), m.port, m.pool_id)
- for m in lbaas_state.members}
-
- for subset in endpoints.get('subsets', []):
- subset_ports = subset.get('ports', [])
- for subset_address in subset.get('addresses', []):
- try:
- target_ip = subset_address['ip']
- target_ref = subset_address['targetRef']
- if target_ref['kind'] != k_const.K8S_OBJ_POD:
- continue
- except KeyError:
- continue
- if not pool_by_tgt_name:
- continue
- for subset_port in subset_ports:
- target_port = subset_port['port']
- port_name = subset_port.get('name')
- try:
- pool = pool_by_tgt_name[port_name]
- except KeyError:
- LOG.debug("No pool found for port: %r", port_name)
- continue
-
- if (target_ip, target_port, pool.id) in current_targets:
- continue
- # TODO(apuimedo): Do not pass subnet_id at all when in
- # L3 mode once old neutron-lbaasv2 is not supported, as
- # octavia does not require it
- if (config.CONF.octavia_defaults.member_mode ==
- k_const.OCTAVIA_L2_MEMBER_MODE):
- try:
- member_subnet_id = self._get_pod_subnet(target_ref,
- target_ip)
- except k_exc.K8sResourceNotFound:
- LOG.debug("Member namespace has been deleted. No "
- "need to add the members as it is "
- "going to be deleted")
- continue
- else:
- # We use the service subnet id so that the connectivity
- # from VIP to pods happens in layer 3 mode, i.e.,
- # routed.
- member_subnet_id = lbaas_state.loadbalancer.subnet_id
- first_member_of_the_pool = True
- for member in lbaas_state.members:
- if pool.id == member.pool_id:
- first_member_of_the_pool = False
- break
- if first_member_of_the_pool:
- listener_port = lsnr_by_id[pool.listener_id].port
- else:
- listener_port = None
-
- member = self._drv_lbaas.ensure_member(
- loadbalancer=lbaas_state.loadbalancer,
- pool=pool,
- subnet_id=member_subnet_id,
- ip=target_ip,
- port=target_port,
- target_ref_namespace=target_ref['namespace'],
- target_ref_name=target_ref['name'],
- listener_port=listener_port)
- lbaas_state.members.append(member)
- changed = True
-
- return changed
-
- def _get_pod_subnet(self, target_ref, ip):
- # REVISIT(ivc): consider using true pod object instead
- pod = {'kind': target_ref['kind'],
- 'metadata': {'name': target_ref['name'],
- 'namespace': target_ref['namespace']}}
- project_id = self._drv_pod_project.get_project(pod)
- subnets_map = self._drv_pod_subnets.get_subnets(pod, project_id)
- subnet_ids = [subnet_id for subnet_id, network in subnets_map.items()
- for subnet in network.subnets.objects
- if ip in subnet.cidr]
- if subnet_ids:
- return subnet_ids[0]
- else:
- # NOTE(ltomasbo): We are assuming that if ip is not on the
- # pod subnet is because the member is using hostnetworking. In
- # this worker_nodes_subnet will be used
- return config.CONF.pod_vif_nested.worker_nodes_subnet
-
- def _get_port_in_pool(self, pool, lbaas_state, lbaas_spec):
- for listener in lbaas_state.listeners:
- if listener.id != pool.listener_id:
- continue
- for port in lbaas_spec.ports:
- if (listener.port == port.port and
- listener.protocol == port.protocol):
- return port
- return None
+ def _create_crd_spec(self, endpoints):
+ endpoints_name = endpoints['metadata']['name']
+ namespace = endpoints['metadata']['namespace']
+ kubernetes = clients.get_kubernetes_client()
+
+ subsets = endpoints.get('subsets', [])
+
+ loadbalancer_crd = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrLoadBalancer',
+ 'metadata': {
+ 'name': endpoints_name,
+ 'finalizers': [k_const.KURYRLB_FINALIZER]
+ },
+ 'spec': {
+ 'subsets': subsets
+ },
+ 'status': {
+ }
+ }
- def _remove_unused_members(self, endpoints, lbaas_state, lbaas_spec):
- spec_ports = {}
- for pool in lbaas_state.pools:
- port = self._get_port_in_pool(pool, lbaas_state, lbaas_spec)
- if port:
- spec_ports[port.name] = pool.id
-
- current_targets = {(a['ip'], a.get('targetRef', {}).get('name', ''),
- p['port'], spec_ports.get(p.get('name')))
- for s in endpoints['subsets']
- for a in s['addresses']
- for p in s['ports']
- if p.get('name') in spec_ports}
-
- removed_ids = set()
- for member in lbaas_state.members:
- try:
- member_name = member.name
- # NOTE: The member name is compose of:
- # NAMESPACE_NAME/POD_NAME:PROTOCOL_PORT
- pod_name = member_name.split('/')[1].split(':')[0]
- except AttributeError:
- pod_name = ""
- if ((str(member.ip), pod_name, member.port, member.pool_id) in
- current_targets):
- continue
- self._drv_lbaas.release_member(lbaas_state.loadbalancer,
- member)
- removed_ids.add(member.id)
-
- if removed_ids:
- lbaas_state.members = [m for m in lbaas_state.members
- if m.id not in removed_ids]
- return bool(removed_ids)
-
- def _sync_lbaas_pools(self, endpoints, lbaas_state, lbaas_spec):
- changed = False
-
- if self._remove_unused_pools(lbaas_state, lbaas_spec):
- changed = True
-
- if self._sync_lbaas_listeners(endpoints, lbaas_state, lbaas_spec):
- changed = True
-
- if self._add_new_pools(lbaas_state, lbaas_spec):
- changed = True
-
- return changed
-
- def _add_new_pools(self, lbaas_state, lbaas_spec):
- changed = False
-
- current_listeners_ids = {pool.listener_id
- for pool in lbaas_state.pools}
- for listener in lbaas_state.listeners:
- if listener.id in current_listeners_ids:
- continue
- pool = self._drv_lbaas.ensure_pool(lbaas_state.loadbalancer,
- listener)
- lbaas_state.pools.append(pool)
- changed = True
-
- return changed
-
- def _is_pool_in_spec(self, pool, lbaas_state, lbaas_spec):
- # NOTE(yboaron): in order to check if a specific pool is in lbaas_spec
- # we should:
- # 1. get the listener that pool is attached to
- # 2. check if listener's attributes appear in lbaas_spec.
- for listener in lbaas_state.listeners:
- if listener.id != pool.listener_id:
- continue
- for port in lbaas_spec.ports:
- if (listener.port == port.port and
- listener.protocol == port.protocol):
- return True
- return False
-
- def _remove_unused_pools(self, lbaas_state, lbaas_spec):
- removed_ids = set()
- for pool in lbaas_state.pools:
- if self._is_pool_in_spec(pool, lbaas_state, lbaas_spec):
- continue
- self._drv_lbaas.release_pool(lbaas_state.loadbalancer,
- pool)
- removed_ids.add(pool.id)
- if removed_ids:
- lbaas_state.pools = [p for p in lbaas_state.pools
- if p.id not in removed_ids]
- lbaas_state.members = [m for m in lbaas_state.members
- if m.pool_id not in removed_ids]
- return bool(removed_ids)
-
- def _sync_lbaas_listeners(self, endpoints, lbaas_state, lbaas_spec):
- changed = False
-
- if self._remove_unused_listeners(endpoints, lbaas_state, lbaas_spec):
- changed = True
-
- if self._sync_lbaas_loadbalancer(endpoints, lbaas_state, lbaas_spec):
- changed = True
-
- if self._add_new_listeners(endpoints, lbaas_spec, lbaas_state):
- changed = True
-
- return changed
-
- def _add_new_listeners(self, endpoints, lbaas_spec, lbaas_state):
- changed = False
- lbaas_spec_ports = sorted(lbaas_spec.ports, key=lambda x: x.protocol)
- for port_spec in lbaas_spec_ports:
- protocol = port_spec.protocol
- port = port_spec.port
- name = "%s:%s" % (lbaas_state.loadbalancer.name, protocol)
- listener = [listener for listener in lbaas_state.listeners
- if listener.port == port and
- listener.protocol == protocol]
- if listener:
- continue
- # FIXME (maysams): Due to a bug in Octavia, which does
- # not allows listeners with same port but different
- # protocols to co-exist, we need to skip the creation of
- # listeners that have the same port as an existing one.
- listener = [listener for listener in lbaas_state.listeners if
- listener.port == port]
- if listener and not self._drv_lbaas.double_listeners_supported():
- LOG.warning("Skipping listener creation for %s as another one"
- " already exists with port %s", name, port)
- continue
- listener = self._drv_lbaas.ensure_listener(
- loadbalancer=lbaas_state.loadbalancer,
- protocol=protocol,
- port=port,
- service_type=lbaas_spec.type)
- if listener is not None:
- lbaas_state.listeners.append(listener)
- changed = True
- return changed
-
- def _remove_unused_listeners(self, endpoints, lbaas_state, lbaas_spec):
- current_listeners = {p.listener_id for p in lbaas_state.pools}
-
- removed_ids = set()
- for listener in lbaas_state.listeners:
- if listener.id in current_listeners:
- continue
- self._drv_lbaas.release_listener(lbaas_state.loadbalancer,
- listener)
- removed_ids.add(listener.id)
- if removed_ids:
- lbaas_state.listeners = [
- listener for listener in lbaas_state.listeners
- if listener.id not in removed_ids]
- return bool(removed_ids)
-
- def _update_lb_status(self, endpoints, lb_ip_address):
- status_data = {"loadBalancer": {
- "ingress": [{"ip": lb_ip_address.format()}]}}
- k8s = clients.get_kubernetes_client()
- svc_status_link = self._get_service_link(endpoints) + '/status'
try:
- k8s.patch("status", svc_status_link, status_data)
+ kubernetes.post('{}/{}/kuryrloadbalancers'.format(
+ k_const.K8S_API_CRD_NAMESPACES, namespace), loadbalancer_crd)
+ except k_exc.K8sConflict:
+ raise k_exc.ResourceNotReady(loadbalancer_crd)
except k_exc.K8sClientException:
- # REVISIT(ivc): only raise ResourceNotReady for NotFound
- raise k_exc.ResourceNotReady(svc_status_link)
+ LOG.exception("Kubernetes Client Exception creating "
+ "kuryrloadbalancer CRD. %s" %
+ k_exc.K8sClientException)
+ raise
+ return loadbalancer_crd
+
+ def _update_crd_spec(self, loadbalancer_crd, endpoints):
+ kubernetes = clients.get_kubernetes_client()
+ subsets = endpoints.get('subsets')
+ lbaas_update_crd = {
+ 'subsets': subsets
+ }
+ try:
+ kubernetes.patch_crd('spec', loadbalancer_crd['metadata'][
+ 'selfLink'], lbaas_update_crd)
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s', loadbalancer_crd)
+ except k_exc.K8sConflict:
+ raise k_exc.ResourceNotReady(loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryrLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+
+ return loadbalancer_crd
def _get_service_link(self, endpoints):
ep_link = endpoints['metadata']['selfLink']
@@ -612,129 +357,6 @@ def _get_service_link(self, endpoints):
if link_parts[-2] != 'endpoints':
raise k_exc.IntegrityError(_(
"Unsupported endpoints link: %(link)s") % {
- 'link': ep_link})
+ 'link': ep_link})
link_parts[-2] = 'services'
return "/".join(link_parts)
-
- def _sync_lbaas_loadbalancer(self, endpoints, lbaas_state, lbaas_spec):
- changed = False
- lb = lbaas_state.loadbalancer
-
- if lb and lb.ip != lbaas_spec.ip:
- # if loadbalancerIP was associated to lbaas VIP, disassociate it.
- if lbaas_state.service_pub_ip_info:
- self._drv_service_pub_ip.disassociate_pub_ip(
- lbaas_state.service_pub_ip_info)
-
- self._drv_lbaas.release_loadbalancer(
- loadbalancer=lb)
- lb = None
- lbaas_state.pools = []
- lbaas_state.listeners = []
- lbaas_state.members = []
- changed = True
-
- if not lb:
- if lbaas_spec.ip:
- lb_name = self._drv_lbaas.get_service_loadbalancer_name(
- endpoints['metadata']['namespace'],
- endpoints['metadata']['name'])
- lb = self._drv_lbaas.ensure_loadbalancer(
- name=lb_name,
- project_id=lbaas_spec.project_id,
- subnet_id=lbaas_spec.subnet_id,
- ip=lbaas_spec.ip,
- security_groups_ids=lbaas_spec.security_groups_ids,
- service_type=lbaas_spec.type,
- provider=self._lb_provider)
- changed = True
- elif lbaas_state.service_pub_ip_info:
- self._drv_service_pub_ip.release_pub_ip(
- lbaas_state.service_pub_ip_info)
- lbaas_state.service_pub_ip_info = None
- changed = True
-
- lbaas_state.loadbalancer = lb
- return changed
-
- def _cleanup_leftover_lbaas(self):
- lbaas_client = clients.get_loadbalancer_client()
- services = []
- try:
- services = driver_utils.get_services().get('items')
- except k_exc.K8sClientException:
- LOG.debug("Skipping cleanup of leftover lbaas. "
- "Error retriving Kubernetes services")
- return
- services_cluster_ip = {service['spec']['clusterIP']: service
- for service in services
- if service['spec'].get('clusterIP')}
-
- services_without_selector = set(
- service['spec']['clusterIP'] for service in services
- if (service['spec'].get('clusterIP') and
- not service['spec'].get('selector')))
- lbaas_spec = {}
- self._drv_lbaas.add_tags('loadbalancer', lbaas_spec)
- loadbalancers = lbaas_client.load_balancers(**lbaas_spec)
- for loadbalancer in loadbalancers:
- if loadbalancer.vip_address not in services_cluster_ip.keys():
- lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
- eventlet.spawn(self._ensure_release_lbaas, lb_obj)
- else:
- # check if the provider is the right one
- if (loadbalancer.vip_address not in services_without_selector
- and self._lb_provider
- and self._lb_provider != loadbalancer.provider):
- LOG.debug("Removing loadbalancer with old provider: %s",
- loadbalancer)
- lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
- eventlet.spawn(
- self._ensure_release_lbaas,
- lb_obj,
- services_cluster_ip[loadbalancer.vip_address])
- # NOTE(ltomasbo): give some extra time in between lbs
- # recreation actions
- time.sleep(1)
-
- def _ensure_release_lbaas(self, lb_obj, svc=None):
- attempts = 0
- deadline = 0
- retry = True
- timeout = config.CONF.kubernetes.watch_retry_timeout
- while retry:
- try:
- if attempts == 1:
- deadline = time.time() + timeout
- if (attempts > 0 and
- utils.exponential_sleep(deadline, attempts) == 0):
- LOG.error("Failed releasing lbaas '%s': deadline exceeded",
- lb_obj.name)
- return
- self._drv_lbaas.release_loadbalancer(lb_obj)
- retry = False
- except k_exc.ResourceNotReady:
- LOG.debug("Attempt (%s) of loadbalancer release %s failed."
- " A retry will be triggered.", attempts,
- lb_obj.name)
- attempts += 1
- retry = True
- if svc:
- endpoints_link = utils.get_endpoints_link(svc)
- k8s = clients.get_kubernetes_client()
- try:
- endpoints = k8s.get(endpoints_link)
- except k_exc.K8sResourceNotFound:
- LOG.debug("Endpoint not Found.")
- return
-
- lbaas = utils.get_lbaas_state(endpoints)
- if lbaas:
- lbaas.loadbalancer = None
- lbaas.pools = []
- lbaas.listeners = []
- lbaas.members = []
- # NOTE(ltomasbo): give some extra time to ensure the Load
- # Balancer VIP is also released
- time.sleep(1)
- utils.set_lbaas_state(endpoints, lbaas)
diff --git a/kuryr_kubernetes/controller/handlers/loadbalancer.py b/kuryr_kubernetes/controller/handlers/loadbalancer.py
new file mode 100644
index 000000000..7ec3769ae
--- /dev/null
+++ b/kuryr_kubernetes/controller/handlers/loadbalancer.py
@@ -0,0 +1,810 @@
+# Copyright (c) 2020 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import eventlet
+import time
+
+from oslo_log import log as logging
+
+from kuryr_kubernetes import clients
+from kuryr_kubernetes import config
+from kuryr_kubernetes import constants as k_const
+from kuryr_kubernetes.controller.drivers import base as drv_base
+from kuryr_kubernetes.controller.drivers import utils as driver_utils
+from kuryr_kubernetes import exceptions as k_exc
+from kuryr_kubernetes.handlers import k8s_base
+from kuryr_kubernetes.objects import lbaas as obj_lbaas
+from kuryr_kubernetes import utils
+
+LOG = logging.getLogger(__name__)
+
+SUPPORTED_SERVICE_TYPES = ('ClusterIP', 'LoadBalancer')
+
+
+class KuryrLoadBalancerHandler(k8s_base.ResourceEventHandler):
+ """LoadBalancerStatusHandler handles K8s Endpoints events.
+
+ LBStatusHandler handles K8s Endpoints events and tracks changes in
+ LBaaSServiceSpec to update Neutron LBaaS accordingly and to reflect its'
+ actual state in LBaaSState.
+ """
+
+ OBJECT_KIND = k_const.K8S_OBJ_KURYRLOADBALANCER
+ OBJECT_WATCH_PATH = k_const.K8S_API_CRD_KURYRLOADBALANCERS
+
+ def __init__(self):
+ super(KuryrLoadBalancerHandler, self).__init__()
+ self._drv_lbaas = drv_base.LBaaSDriver.get_instance()
+ self._drv_pod_project = drv_base.PodProjectDriver.get_instance()
+ self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance()
+ self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
+ # Note(yboaron) LBaaS driver supports 'provider' parameter in
+ # Load Balancer creation flow.
+ # We need to set the requested load balancer provider
+ # according to 'endpoints_driver_octavia_provider' configuration.
+ self._lb_provider = None
+ if (config.CONF.kubernetes.endpoints_driver_octavia_provider
+ != 'default'):
+ self._lb_provider = (
+ config.CONF.kubernetes.endpoints_driver_octavia_provider)
+ eventlet.spawn(self._cleanup_leftover_lbaas)
+
+ def on_present(self, loadbalancer_crd):
+ if self._should_ignore(loadbalancer_crd):
+ LOG.debug("Ignoring Kubernetes service %s",
+ loadbalancer_crd['metadata']['name'])
+ return
+
+ try:
+ name = loadbalancer_crd['metadata']['name']
+ namespace = loadbalancer_crd['metadata']['namespace']
+ self._get_loadbalancer_crd(name, namespace)
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except KeyError:
+ LOG.debug('KuryrLoadbalancer CRD not found')
+ raise k_exc.ResourceNotReady(loadbalancer_crd)
+
+ if self._sync_lbaas_members(loadbalancer_crd):
+ # Note(yboaron) For LoadBalancer services, we should allocate FIP,
+ # associate it to LB VIP and update K8S service status
+ # if loadbalancer_crd['status'].get('service_pub_ip_info') is None:
+ lb_ip = loadbalancer_crd['spec'].get('lb_ip')
+ pub_info = loadbalancer_crd['status'].get(
+ 'service_pub_ip_info')
+ if pub_info is None:
+ service_pub_ip_info = (
+ self._drv_service_pub_ip.acquire_service_pub_ip_info(
+ loadbalancer_crd['spec']['type'],
+ lb_ip,
+ loadbalancer_crd['spec']['project_id'],
+ loadbalancer_crd['status']['loadbalancer'][
+ 'port_id']))
+ if service_pub_ip_info:
+ self._drv_service_pub_ip.associate_pub_ip(
+ service_pub_ip_info, loadbalancer_crd['status'][
+ 'loadbalancer']['port_id'])
+ loadbalancer_crd['status'][
+ 'service_pub_ip_info'] = service_pub_ip_info
+ self._update_lb_status(loadbalancer_crd)
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd[
+ 'metadata']['selfLink'], loadbalancer_crd[
+ 'status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+
+ def _should_ignore(self, loadbalancer_crd):
+ return not(self._has_pods(loadbalancer_crd))
+
+ def _has_pods(self, loadbalancer_crd):
+ ep_subsets = loadbalancer_crd['spec'].get('subsets', [])
+ if not ep_subsets:
+ return False
+ return any(True
+ for subset in ep_subsets
+ for address in subset.get('addresses', [])
+ if address['targetRef'].get('kind', []) == 'Pod')
+
+ def on_finalize(self, loadbalancer_crd):
+ LOG.debug("Deleting the loadbalancer CRD")
+
+ if not loadbalancer_crd:
+ LOG.warning("Load Balancer CRD not present")
+ return
+
+ if loadbalancer_crd['status'] != {}:
+ # NOTE(ivc): deleting pool deletes its members
+ self._drv_lbaas.release_loadbalancer(
+ loadbalancer=loadbalancer_crd['status'].get('loadbalancer'))
+
+ try:
+ pub_info = loadbalancer_crd['status']['service_pub_ip_info']
+ except KeyError:
+ pub_info = None
+
+ if pub_info:
+ self._drv_service_pub_ip.release_pub_ip(
+ loadbalancer_crd['status']['service_pub_ip_info'])
+
+ kubernetes = clients.get_kubernetes_client()
+ LOG.debug('Removing finalizer from KuryrLoadBalancer CRD %s',
+ loadbalancer_crd)
+ try:
+ kubernetes.remove_finalizer(loadbalancer_crd,
+ k_const.KURYRLB_FINALIZER)
+ except k_exc.K8sClientException:
+ LOG.exception('Error removing kuryrloadbalancer CRD finalizer'
+ 'for %s', loadbalancer_crd)
+ raise
+
+ namespace = loadbalancer_crd['metadata']['namespace']
+ name = loadbalancer_crd['metadata']['name']
+ try:
+ service = kubernetes.get(f"{k_const.K8S_API_NAMESPACES}"
+ f"/{namespace}/services/{name}")
+ except k_exc.K8sResourceNotFound as ex:
+ LOG.exception("Failed to get service: %s", ex)
+ raise
+
+ LOG.debug('Removing finalizer from service %s',
+ service["metadata"]["name"])
+ try:
+ kubernetes.remove_finalizer(service, k_const.SERVICE_FINALIZER)
+ except k_exc.K8sClientException:
+ LOG.exception('Error removing service finalizer'
+ 'for %s', service["metadata"]["name"])
+ raise
+
+ def _get_loadbalancer_crd(self, loadbalancer_crd_name, namespace):
+ k8s = clients.get_kubernetes_client()
+ try:
+ loadbalancer_crd = k8s.get('{}/{}/kuryrloadbalancers/{}'.format(
+ k_const.K8S_API_CRD_NAMESPACES, namespace,
+ loadbalancer_crd_name))
+ except k_exc.K8sResourceNotFound:
+ return None
+ except k_exc.K8sClientException:
+ LOG.exception("Kubernetes Client Exception.")
+ raise
+ return loadbalancer_crd
+
+ def _sync_lbaas_members(self, loadbalancer_crd):
+ changed = False
+
+ if (self._has_pods(loadbalancer_crd) and
+ self._remove_unused_members(loadbalancer_crd)):
+ changed = True
+
+ if self._sync_lbaas_pools(loadbalancer_crd):
+ changed = True
+
+ if (self._has_pods(loadbalancer_crd) and
+ self._add_new_members(loadbalancer_crd)):
+ changed = True
+
+ return changed
+
+ def _sync_lbaas_sgs(self, loadbalancer_crd):
+ # NOTE (maysams) Need to retrieve the LBaaS Spec again due to
+ # the possibility of it being updated after the LBaaS creation
+ # process has started.
+ lbaas_spec = loadbalancer_crd.get('spec')
+
+ lb = loadbalancer_crd['status'].get('loadbalancer')
+ if not lb:
+ return
+
+ default_sgs = config.CONF.neutron_defaults.pod_security_groups
+ # NOTE(maysams) As the endpoint and svc are annotated with the
+ # 'lbaas_spec' in two separate k8s calls, it's possible that
+ # the endpoint got annotated and the svc haven't due to controller
+ # restarts. For this case, a resourceNotReady exception is raised
+ # till the svc gets annotated with a 'lbaas_spec'.
+ if lbaas_spec:
+ lbaas_spec_sgs = loadbalancer_crd['spec'].get(
+ 'security_groups_ids', [])
+ else:
+ raise k_exc.ResourceNotReady(lbaas_spec_sgs)
+ if (lb.get('security_groups') and
+ lb.get('security_groups') != lbaas_spec_sgs):
+ sgs = [lb_sg for lb_sg in lb['security_groups']
+ if lb_sg not in default_sgs]
+ if lbaas_spec_sgs != default_sgs:
+ sgs.extend(lbaas_spec_sgs)
+
+ # Check if this should update the CRD
+ lb['security_groups'] = sgs
+
+ def _add_new_members(self, loadbalancer_crd):
+ changed = False
+ try:
+ self._sync_lbaas_sgs(loadbalancer_crd)
+ except k_exc.K8sResourceNotFound:
+ LOG.debug("The svc has been deleted while processing the endpoints"
+ " update. No need to add new members.")
+
+ lsnr_by_id = {l['id']: l for l in loadbalancer_crd['status'].get(
+ 'listeners', [])}
+ pool_by_lsnr_port = {(lsnr_by_id[p['listener_id']]['protocol'],
+ lsnr_by_id[p['listener_id']]['port']): p
+ for p in loadbalancer_crd['status'].get(
+ 'pools', [])}
+
+ # NOTE(yboaron): Since LBaaSv2 doesn't support UDP load balancing,
+ # the LBaaS driver will return 'None' in case of UDP port
+ # listener creation.
+ # we should consider the case in which
+ # 'pool_by_lsnr_port[p.protocol, p.port]' is missing
+ pool_by_tgt_name = {}
+ for p in loadbalancer_crd['spec'].get('ports', []):
+ try:
+ pool_by_tgt_name[p['name']] = pool_by_lsnr_port[p['protocol'],
+ p['port']]
+ except KeyError:
+ continue
+
+ current_targets = {(str(m['ip']), m['port'], m['pool_id'])
+ for m in loadbalancer_crd['status'].get(
+ 'members', [])}
+
+ for subset in loadbalancer_crd['spec']['subsets']:
+ subset_ports = subset.get('ports', [])
+ for subset_address in subset.get('addresses', []):
+ try:
+ target_ip = subset_address['ip']
+ target_ref = subset_address['targetRef']
+ if target_ref['kind'] != k_const.K8S_OBJ_POD:
+ continue
+ except KeyError:
+ continue
+ if not pool_by_tgt_name:
+ continue
+ for subset_port in subset_ports:
+ target_port = subset_port['port']
+ port_name = subset_port.get('name')
+ try:
+ pool = pool_by_tgt_name[port_name]
+ except KeyError:
+ LOG.debug("No pool found for port: %r", port_name)
+ continue
+
+ if (target_ip, target_port, pool['id']) in current_targets:
+ continue
+ # TODO(apuimedo): Do not pass subnet_id at all when in
+ # L3 mode once old neutron-lbaasv2 is not supported, as
+ # octavia does not require it
+ if (config.CONF.octavia_defaults.member_mode ==
+ k_const.OCTAVIA_L2_MEMBER_MODE):
+ try:
+ member_subnet_id = self._get_pod_subnet(target_ref,
+ target_ip)
+ except k_exc.K8sResourceNotFound:
+ LOG.debug("Member namespace has been deleted. No "
+ "need to add the members as it is "
+ "going to be deleted")
+ continue
+ else:
+ # We use the service subnet id so that the connectivity
+ # from VIP to pods happens in layer 3 mode, i.e.,
+ # routed.
+ member_subnet_id = loadbalancer_crd['status'][
+ 'loadbalancer']['subnet_id']
+ first_member_of_the_pool = True
+ for member in loadbalancer_crd['status'].get(
+ 'members', []):
+ if pool['id'] == member['pool_id']:
+ first_member_of_the_pool = False
+ break
+ if first_member_of_the_pool:
+ listener_port = lsnr_by_id[pool['listener_id']][
+ 'port']
+ else:
+ listener_port = None
+ loadbalancer = loadbalancer_crd['status']['loadbalancer']
+ member = self._drv_lbaas.ensure_member(
+ loadbalancer=loadbalancer,
+ pool=pool,
+ subnet_id=member_subnet_id,
+ ip=target_ip,
+ port=target_port,
+ target_ref_namespace=target_ref['namespace'],
+ target_ref_name=target_ref['name'],
+ listener_port=listener_port)
+ members = loadbalancer_crd['status'].get('members', [])
+ if members:
+ loadbalancer_crd['status'].get('members', []).append(
+ member)
+ else:
+ loadbalancer_crd['status']['members'] = []
+ loadbalancer_crd['status'].get('members', []).append(
+ member)
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd[
+ 'metadata']['selfLink'], loadbalancer_crd[
+ 'status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ changed = True
+ return changed
+
+ def _get_pod_subnet(self, target_ref, ip):
+ # REVISIT(ivc): consider using true pod object instead
+ pod = {'kind': target_ref['kind'],
+ 'metadata': {'name': target_ref['name'],
+ 'namespace': target_ref['namespace']}}
+ project_id = self._drv_pod_project.get_project(pod)
+ subnets_map = self._drv_pod_subnets.get_subnets(pod, project_id)
+ subnet_ids = [subnet_id for subnet_id, network in subnets_map.items()
+ for subnet in network.subnets.objects
+ if ip in subnet.cidr]
+ if subnet_ids:
+ return subnet_ids[0]
+ else:
+ # NOTE(ltomasbo): We are assuming that if ip is not on the
+ # pod subnet is because the member is using hostnetworking. In
+ # this worker_nodes_subnet will be used
+ return config.CONF.pod_vif_nested.worker_nodes_subnet
+
+ def _get_port_in_pool(self, pool, loadbalancer_crd):
+
+ for l in loadbalancer_crd['status']['listeners']:
+ if l['id'] != pool['listener_id']:
+ continue
+ for port in loadbalancer_crd['spec'].get('ports', []):
+ if l.get('port') == port.get(
+ 'port') and l.get('protocol') == port.get('protocol'):
+ return port
+ return None
+
+ def _remove_unused_members(self, loadbalancer_crd):
+ spec_ports = {}
+ pools = loadbalancer_crd['status'].get('pools', [])
+ for pool in pools:
+ port = self._get_port_in_pool(pool, loadbalancer_crd)
+ if port:
+ if not port.get('name'):
+ port['name'] = None
+ spec_ports[port['name']] = pool['id']
+
+ subsets = loadbalancer_crd['spec'].get('subsets')
+ current_targets = {(a['ip'], a.get('targetRef', {}).get('name', ''),
+ p['port'], spec_ports.get(p.get('name')))
+ for s in subsets
+ for a in s['addresses']
+ for p in s['ports']
+ if p.get('name') in spec_ports}
+
+ removed_ids = set()
+
+ for member in loadbalancer_crd['status'].get('members', []):
+ try:
+ member_name = member['name']
+ # NOTE: The member name is compose of:
+ # NAMESPACE_NAME/POD_NAME:PROTOCOL_PORT
+ pod_name = member_name.split('/')[1].split(':')[0]
+ except AttributeError:
+ pod_name = ""
+ if ((str(member['ip']), pod_name, member['port'], member[
+ 'pool_id']) in current_targets):
+ continue
+
+ self._drv_lbaas.release_member(loadbalancer_crd['status'][
+ 'loadbalancer'], member)
+ removed_ids.add(member['id'])
+
+ if removed_ids:
+ loadbalancer_crd['status']['members'] = [m for m in
+ loadbalancer_crd[
+ 'status'][
+ 'members']
+ if m['id'] not in
+ removed_ids]
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd[
+ 'metadata']['selfLink'], loadbalancer_crd[
+ 'status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ return bool(removed_ids)
+
+ def _sync_lbaas_pools(self, loadbalancer_crd):
+ changed = False
+
+ if self._remove_unused_pools(loadbalancer_crd):
+ changed = True
+
+ if self._sync_lbaas_listeners(loadbalancer_crd):
+ changed = True
+
+ if self._add_new_pools(loadbalancer_crd):
+ changed = True
+
+ return changed
+
+ def _add_new_pools(self, loadbalancer_crd):
+ changed = False
+
+ current_listeners_ids = {pool['listener_id']
+ for pool in loadbalancer_crd['status'].get(
+ 'pools', [])}
+ for listener in loadbalancer_crd['status'].get('listeners', []):
+ if listener['id'] in current_listeners_ids:
+ continue
+ pool = self._drv_lbaas.ensure_pool(loadbalancer_crd['status'][
+ 'loadbalancer'], listener)
+ pools = loadbalancer_crd['status'].get('pools', [])
+ if pools:
+ loadbalancer_crd['status'].get('pools', []).append(
+ pool)
+ else:
+ loadbalancer_crd['status']['pools'] = []
+ loadbalancer_crd['status'].get('pools', []).append(
+ pool)
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd['metadata'][
+ 'selfLink'], loadbalancer_crd['status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryrLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ changed = True
+ return changed
+
+ def _is_pool_in_spec(self, pool, loadbalancer_crd):
+ # NOTE(yboaron): in order to check if a specific pool is in lbaas_spec
+ # we should:
+ # 1. get the listener that pool is attached to
+ # 2. check if listener's attributes appear in lbaas_spec.
+ for l in loadbalancer_crd['status']['listeners']:
+ if l['id'] != pool['listener_id']:
+ continue
+ for port in loadbalancer_crd['spec'].get('ports'):
+ if l['port'] == port['port'] and l['protocol'] == port[
+ 'protocol']:
+ return True
+ return False
+
+ def _remove_unused_pools(self, loadbalancer_crd):
+ removed_ids = set()
+
+ for pool in loadbalancer_crd['status'].get('pools', []):
+ if self._is_pool_in_spec(pool, loadbalancer_crd):
+ continue
+ self._drv_lbaas.release_pool(loadbalancer_crd['status'][
+ 'loadbalancer'], pool)
+ removed_ids.add(pool['id'])
+ if removed_ids:
+ loadbalancer_crd['status']['pools'] = [p for p in loadbalancer_crd[
+ 'status']['pools'] if p['id'] not in removed_ids]
+ loadbalancer_crd['status']['members'] = [m for m in
+ loadbalancer_crd[
+ 'status']['members']
+ if m['pool_id'] not in
+ removed_ids]
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd[
+ 'metadata']['selfLink'], loadbalancer_crd[
+ 'status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ return bool(removed_ids)
+
+ def _sync_lbaas_listeners(self, loadbalancer_crd):
+ changed = False
+
+ if self._remove_unused_listeners(loadbalancer_crd):
+ changed = True
+
+ if self._sync_lbaas_loadbalancer(loadbalancer_crd):
+ changed = True
+
+ if self._add_new_listeners(loadbalancer_crd):
+ changed = True
+
+ return changed
+
+ def _add_new_listeners(self, loadbalancer_crd):
+ changed = False
+ lb_crd_spec_ports = loadbalancer_crd['spec'].get('ports')
+ if not lb_crd_spec_ports:
+ return changed
+ lbaas_spec_ports = sorted(lb_crd_spec_ports,
+ key=lambda x: x['protocol'])
+ for port_spec in lbaas_spec_ports:
+ protocol = port_spec['protocol']
+ port = port_spec['port']
+ name = "%s:%s" % (loadbalancer_crd['status']['loadbalancer'][
+ 'name'], protocol)
+
+ listener = [l for l in loadbalancer_crd['status'].get(
+ 'listeners', []) if l['port'] == port and l[
+ 'protocol'] == protocol]
+
+ if listener:
+ continue
+ # FIXME (maysams): Due to a bug in Octavia, which does
+ # not allows listeners with same port but different
+ # protocols to co-exist, we need to skip the creation of
+ # listeners that have the same port as an existing one.
+ listener = [l for l in loadbalancer_crd['status'].get(
+ 'listeners', []) if l['port'] == port]
+
+ if listener and not self._drv_lbaas.double_listeners_supported():
+ LOG.warning("Skipping listener creation for %s as another one"
+ " already exists with port %s", name, port)
+ continue
+ listener = self._drv_lbaas.ensure_listener(
+ loadbalancer=loadbalancer_crd['status'].get('loadbalancer'),
+ protocol=protocol,
+ port=port,
+ service_type=loadbalancer_crd['spec'].get('type'))
+ if listener is not None:
+ listeners = loadbalancer_crd['status'].get('listeners', [])
+ if listeners:
+ listeners.append(listener)
+ else:
+ loadbalancer_crd['status']['listeners'] = []
+ loadbalancer_crd['status'].get('listeners', []).append(
+ listener)
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd[
+ 'metadata']['selfLink'], loadbalancer_crd['status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryrLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ changed = True
+ return changed
+
+ def _remove_unused_listeners(self, loadbalancer_crd):
+ current_listeners = {p['listener_id'] for p in loadbalancer_crd[
+ 'status'].get('pools', [])}
+ removed_ids = set()
+ for listener in loadbalancer_crd['status'].get('listeners', []):
+ if listener['id'] in current_listeners:
+ continue
+ self._drv_lbaas.release_listener(loadbalancer_crd['status'][
+ 'loadbalancer'], listener)
+ removed_ids.add(listener['id'])
+ if removed_ids:
+ loadbalancer_crd['status']['listeners'] = [
+ l for l in loadbalancer_crd['status'].get('listeners',
+ []) if l['id']
+ not in removed_ids]
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd[
+ 'metadata']['selfLink'], loadbalancer_crd[
+ 'status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ return bool(removed_ids)
+
+ def _update_lb_status(self, lb_crd):
+ lb_crd_status = lb_crd['status']
+ lb_ip_address = lb_crd_status['service_pub_ip_info']['ip_addr']
+ name = lb_crd['metadata']['name']
+ ns = lb_crd['metadata']['namespace']
+ status_data = {"loadBalancer": {
+ "ingress": [{"ip": lb_ip_address.format()}]}}
+ k8s = clients.get_kubernetes_client()
+ try:
+ k8s.patch("status", f"{k_const.K8S_API_NAMESPACES}"
+ f"/{ns}/services/{name}/status",
+ status_data)
+ except k_exc.K8sConflict:
+ raise k_exc.ResourceNotReady(name)
+ except k_exc.K8sClientException:
+ LOG.exception("Kubernetes Client Exception"
+ "when updating the svc status %s"
+ % name)
+ raise
+
+ def _sync_lbaas_loadbalancer(self, loadbalancer_crd):
+ changed = False
+ lb = loadbalancer_crd['status'].get('loadbalancer')
+
+ if lb and lb['ip'] != loadbalancer_crd['spec'].get('ip'):
+ # if loadbalancerIP was associated to lbaas VIP, disassociate it.
+
+ try:
+ pub_info = loadbalancer_crd['status']['service_pub_ip_info']
+ except KeyError:
+ pub_info = None
+
+ if pub_info:
+ self._drv_service_pub_ip.disassociate_pub_ip(
+ loadbalancer_crd['status']['service_pub_ip_info'])
+
+ self._drv_lbaas.release_loadbalancer(
+ loadbalancer=lb)
+ lb = None
+ loadbalancer_crd['status']['pools'] = []
+ loadbalancer_crd['status']['listeners'] = []
+ loadbalancer_crd['status']['members'] = []
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd['metadata'][
+ 'selfLink'], loadbalancer_crd['status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryrLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+ changed = True
+
+ if not lb:
+ if loadbalancer_crd['spec'].get('ip'):
+ lb_name = self._drv_lbaas.get_service_loadbalancer_name(
+ loadbalancer_crd['metadata']['namespace'],
+ loadbalancer_crd['metadata']['name'])
+ lb = self._drv_lbaas.ensure_loadbalancer(
+ name=lb_name,
+ project_id=loadbalancer_crd['spec'].get('project_id'),
+ subnet_id=loadbalancer_crd['spec'].get('subnet_id'),
+ ip=loadbalancer_crd['spec'].get('ip'),
+ security_groups_ids=loadbalancer_crd['spec'].get(
+ 'security_groups_ids'),
+ service_type=loadbalancer_crd['spec'].get('type'),
+ provider=self._lb_provider)
+ loadbalancer_crd['status']['loadbalancer'] = lb
+ changed = True
+ elif loadbalancer_crd['status'].get('service_pub_ip_info'):
+ self._drv_service_pub_ip.release_pub_ip(
+ loadbalancer_crd['status']['service_pub_ip_info'])
+ loadbalancer_crd['status']['service_pub_ip_info'] = None
+ changed = True
+
+ kubernetes = clients.get_kubernetes_client()
+ try:
+ kubernetes.patch_crd('status', loadbalancer_crd['metadata'][
+ 'selfLink'], loadbalancer_crd['status'])
+ except k_exc.K8sResourceNotFound:
+ LOG.debug('KuryrLoadbalancer CRD not found %s',
+ loadbalancer_crd)
+ except k_exc.K8sClientException:
+ LOG.exception('Error updating KuryrLoadbalancer CRD %s',
+ loadbalancer_crd)
+ raise
+
+ return changed
+
+ def _cleanup_leftover_lbaas(self):
+ lbaas_client = clients.get_loadbalancer_client()
+ services = []
+ try:
+ services = driver_utils.get_services().get('items')
+ except k_exc.K8sClientException:
+ LOG.debug("Skipping cleanup of leftover lbaas. "
+ "Error retriving Kubernetes services")
+ return
+ services_cluster_ip = {service['spec']['clusterIP']: service
+ for service in services
+ if service['spec'].get('clusterIP')}
+
+ services_without_selector = set(
+ service['spec']['clusterIP'] for service in services
+ if (service['spec'].get('clusterIP') and
+ not service['spec'].get('selector')))
+ lbaas_spec = {}
+ self._drv_lbaas.add_tags('loadbalancer', lbaas_spec)
+ loadbalancers = lbaas_client.load_balancers(**lbaas_spec)
+ for loadbalancer in loadbalancers:
+ if loadbalancer.vip_address not in services_cluster_ip.keys():
+ lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
+ eventlet.spawn(self._ensure_release_lbaas, lb_obj)
+ else:
+ # check if the provider is the right one
+ if (loadbalancer.vip_address not in services_without_selector
+ and self._lb_provider
+ and self._lb_provider != loadbalancer.provider):
+ LOG.debug("Removing loadbalancer with old provider: %s",
+ loadbalancer)
+ lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
+ eventlet.spawn(
+ self._ensure_release_lbaas,
+ lb_obj,
+ services_cluster_ip[loadbalancer.vip_address])
+ # NOTE(ltomasbo): give some extra time in between lbs
+ # recreation actions
+ time.sleep(1)
+
+ def _ensure_release_lbaas(self, lb_obj, svc=None):
+ attempts = 0
+ deadline = 0
+ retry = True
+ timeout = config.CONF.kubernetes.watch_retry_timeout
+ while retry:
+ try:
+ if attempts == 1:
+ deadline = time.time() + timeout
+ if (attempts > 0 and
+ utils.exponential_sleep(deadline, attempts) == 0):
+ LOG.error("Failed releasing lbaas '%s': deadline exceeded",
+ lb_obj.name)
+ return
+ self._drv_lbaas.release_loadbalancer(lb_obj)
+ retry = False
+ except k_exc.ResourceNotReady:
+ LOG.debug("Attempt (%s) of loadbalancer release %s failed."
+ " A retry will be triggered.", attempts,
+ lb_obj.name)
+ attempts += 1
+ retry = True
+ if svc:
+ endpoints_link = utils.get_endpoints_link(svc)
+ k8s = clients.get_kubernetes_client()
+ try:
+ endpoints = k8s.get(endpoints_link)
+ except k_exc.K8sResourceNotFound:
+ LOG.debug("Endpoint not Found.")
+ return
+
+ lbaas = utils.get_lbaas_state(endpoints)
+ if lbaas:
+ lbaas.loadbalancer = None
+ lbaas.pools = []
+ lbaas.listeners = []
+ lbaas.members = []
+ # NOTE(ltomasbo): give some extra time to ensure the Load
+ # Balancer VIP is also released
+ time.sleep(1)
+ utils.set_lbaas_state(endpoints, lbaas)
diff --git a/kuryr_kubernetes/controller/handlers/pod_label.py b/kuryr_kubernetes/controller/handlers/pod_label.py
index 6c3ff9860..9a234be47 100644
--- a/kuryr_kubernetes/controller/handlers/pod_label.py
+++ b/kuryr_kubernetes/controller/handlers/pod_label.py
@@ -47,26 +47,35 @@ def __init__(self):
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
def on_present(self, pod):
- if driver_utils.is_host_network(pod) or not self._has_pod_state(pod):
+ if driver_utils.is_host_network(pod) or not self._has_vifs(pod):
# NOTE(ltomasbo): The event will be retried once the vif handler
# annotates the pod with the pod state.
return
- current_pod_labels = pod['metadata'].get('labels')
- previous_pod_labels = self._get_pod_labels(pod)
- LOG.debug("Got previous pod labels from annotation: %r",
- previous_pod_labels)
+ if (constants.K8S_ANNOTATION_VIF in
+ pod['metadata'].get('annotations', {})):
+ # NOTE(dulek): This might happen on upgrade, we need to wait for
+ # annotation to be moved to KuryrPort CRD.
+ return
+
+ current_pod_info = (pod['metadata'].get('labels'),
+ pod['status'].get('podIP'))
+ previous_pod_info = self._get_pod_info(pod)
+ LOG.debug("Got previous pod info from annotation: %r",
+ previous_pod_info)
- if current_pod_labels == previous_pod_labels:
+ if current_pod_info == previous_pod_info:
return
+ # FIXME(dulek): We should be able to just do create if only podIP
+ # changed, right?
crd_pod_selectors = self._drv_sg.update_sg_rules(pod)
project_id = self._drv_project.get_project(pod)
security_groups = self._drv_sg.get_security_groups(pod, project_id)
self._drv_vif_pool.update_vif_sgs(pod, security_groups)
try:
- self._set_pod_labels(pod, current_pod_labels)
+ self._set_pod_info(pod, current_pod_info)
except k_exc.K8sResourceNotFound:
LOG.debug("Pod already deleted, no need to retry.")
return
@@ -75,33 +84,37 @@ def on_present(self, pod):
services = driver_utils.get_services()
self._update_services(services, crd_pod_selectors, project_id)
- def _get_pod_labels(self, pod):
+ def _get_pod_info(self, pod):
try:
annotations = pod['metadata']['annotations']
pod_labels_annotation = annotations[constants.K8S_ANNOTATION_LABEL]
+ pod_ip_annotation = annotations[constants.K8S_ANNOTATION_IP]
except KeyError:
- return None
+ return None, None
pod_labels = jsonutils.loads(pod_labels_annotation)
- return pod_labels
+ return pod_labels, pod_ip_annotation
- def _set_pod_labels(self, pod, labels):
- if not labels:
- LOG.debug("Removing Label annotation: %r", labels)
- annotation = None
+ def _set_pod_info(self, pod, info):
+ if not info[0]:
+ LOG.debug("Removing info annotations: %r", info)
+ annotation = None, info[1]
else:
- annotation = jsonutils.dumps(labels, sort_keys=True)
- LOG.debug("Setting Labels annotation: %r", annotation)
+ annotation = jsonutils.dumps(info[0], sort_keys=True), info[1]
+ LOG.debug("Setting info annotations: %r", annotation)
k8s = clients.get_kubernetes_client()
k8s.annotate(pod['metadata']['selfLink'],
- {constants.K8S_ANNOTATION_LABEL: annotation},
+ {
+ constants.K8S_ANNOTATION_LABEL: annotation[0],
+ constants.K8S_ANNOTATION_IP: annotation[1]
+ },
resource_version=pod['metadata']['resourceVersion'])
- def _has_pod_state(self, pod):
+ def _has_vifs(self, pod):
try:
- pod_state = pod['metadata']['annotations'][
- constants.K8S_ANNOTATION_VIF]
- LOG.debug("Pod state is: %s", pod_state)
+ kp = driver_utils.get_vifs(pod)
+ vifs = kp['spec']['vifs']
+ LOG.debug("Pod have associated KuryrPort with vifs: %s", vifs)
except KeyError:
return False
return True
@@ -111,6 +124,5 @@ def _update_services(self, services, crd_pod_selectors, project_id):
if not driver_utils.service_matches_affected_pods(
service, crd_pod_selectors):
continue
- sgs = self._drv_svc_sg.get_security_groups(service,
- project_id)
+ sgs = self._drv_svc_sg.get_security_groups(service, project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
diff --git a/kuryr_kubernetes/controller/handlers/policy.py b/kuryr_kubernetes/controller/handlers/policy.py
index b916e3210..237b1c9fb 100644
--- a/kuryr_kubernetes/controller/handlers/policy.py
+++ b/kuryr_kubernetes/controller/handlers/policy.py
@@ -12,15 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from openstack import exceptions as os_exc
-from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drivers
-from kuryr_kubernetes.controller.drivers import utils as driver_utils
-from kuryr_kubernetes import exceptions
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
@@ -36,99 +32,25 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
def __init__(self):
super(NetworkPolicyHandler, self).__init__()
self._drv_policy = drivers.NetworkPolicyDriver.get_instance()
- self._drv_project = drivers.NetworkPolicyProjectDriver.get_instance()
- self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
- specific_driver='multi_pool')
- self._drv_vif_pool.set_vif_driver()
- self._drv_pod_sg = drivers.PodSecurityGroupsDriver.get_instance()
- self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance()
- self._drv_lbaas = drivers.LBaaSDriver.get_instance()
+ self.k8s = clients.get_kubernetes_client()
def on_present(self, policy):
LOG.debug("Created or updated: %s", policy)
- project_id = self._drv_project.get_project(policy)
- pods_to_update = []
- modified_pods = self._drv_policy.ensure_network_policy(policy,
- project_id)
- if modified_pods:
- pods_to_update.extend(modified_pods)
+ self._drv_policy.ensure_network_policy(policy)
- matched_pods = self._drv_policy.affected_pods(policy)
- pods_to_update.extend(matched_pods)
+ # Put finalizer in if it's not there already.
+ self.k8s.add_finalizer(policy, k_const.NETWORKPOLICY_FINALIZER)
- for pod in pods_to_update:
- if driver_utils.is_host_network(pod):
- continue
- pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
- self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
-
- if (pods_to_update and
- oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
- not self._is_egress_only_policy(policy)):
- # NOTE(ltomasbo): only need to change services if the pods that
- # they point to are updated
- services = driver_utils.get_services(
- policy['metadata']['namespace'])
- for service in services.get('items'):
- # TODO(ltomasbo): Skip other services that are not affected
- # by the policy
- if (not service['spec'].get('selector') or not
- self._is_service_affected(service, pods_to_update)):
- continue
- sgs = self._drv_svc_sg.get_security_groups(service,
- project_id)
- self._drv_lbaas.update_lbaas_sg(service, sgs)
-
- def on_deleted(self, policy):
- LOG.debug("Deleted network policy: %s", policy)
- project_id = self._drv_project.get_project(policy)
- pods_to_update = self._drv_policy.affected_pods(policy)
- netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
- if netpolicy_crd:
- crd_sg = netpolicy_crd['spec'].get('securityGroupId')
- for pod in pods_to_update:
- if driver_utils.is_host_network(pod):
- continue
- pod_sgs = self._drv_pod_sg.get_security_groups(pod,
- project_id)
- if crd_sg in pod_sgs:
- pod_sgs.remove(crd_sg)
- if not pod_sgs:
- pod_sgs = (
- oslo_cfg.CONF.neutron_defaults.pod_security_groups)
- if not pod_sgs:
- raise oslo_cfg.RequiredOptError(
- 'pod_security_groups',
- oslo_cfg.OptGroup('neutron_defaults'))
- try:
- self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
- except os_exc.NotFoundException:
- LOG.debug("Fail to update pod sgs."
- " Retrying policy deletion.")
- raise exceptions.ResourceNotReady(policy)
-
- # ensure ports at the pool don't have the NP sg associated
- net_id = self._get_policy_net_id(policy)
- self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)
-
- self._drv_policy.release_network_policy(netpolicy_crd)
-
- if (oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
- not self._is_egress_only_policy(policy)):
- services = driver_utils.get_services(
- policy['metadata']['namespace'])
- for svc in services.get('items'):
- if (not svc['spec'].get('selector') or not
- self._is_service_affected(svc, pods_to_update)):
- continue
- sgs = self._drv_svc_sg.get_security_groups(svc,
- project_id)
- self._drv_lbaas.update_lbaas_sg(svc, sgs)
+ def on_finalize(self, policy):
+ LOG.debug("Finalizing policy %s", policy)
+ if not self._drv_policy.release_network_policy(policy):
+ # KNP was not found, so we need to finalize on our own.
+ self.k8s.remove_finalizer(policy, k_const.NETWORKPOLICY_FINALIZER)
def is_ready(self, quota):
- if not (utils.has_kuryr_crd(k_const.K8S_API_CRD_KURYRNETPOLICIES) and
- self._check_quota(quota)):
+ if not (utils.has_kuryr_crd(k_const.K8S_API_CRD_KURYRNETWORKPOLICIES)
+ and self._check_quota(quota)):
LOG.error("Marking NetworkPolicyHandler as not ready.")
return False
return True
@@ -137,29 +59,3 @@ def _check_quota(self, quota):
if utils.has_limit(quota.security_groups):
return utils.is_available('security_groups', quota.security_groups)
return True
-
- def _is_service_affected(self, service, affected_pods):
- svc_namespace = service['metadata']['namespace']
- svc_selector = service['spec'].get('selector')
- svc_pods = driver_utils.get_pods({'selector': svc_selector},
- svc_namespace).get('items')
- return any(pod in svc_pods for pod in affected_pods)
-
- def _get_policy_net_id(self, policy):
- policy_ns = policy['metadata']['namespace']
-
- kubernetes = clients.get_kubernetes_client()
- try:
- path = (f'{k_const.K8S_API_CRD_NAMESPACES}/{policy_ns}/'
- f'kuryrnetworks/{policy_ns}')
- net_crd = kubernetes.get(path)
- except exceptions.K8sClientException:
- LOG.exception("Kubernetes Client Exception.")
- raise
- return net_crd['status']['netId']
-
- def _is_egress_only_policy(self, policy):
- policy_types = policy['spec'].get('policyTypes', [])
- return (policy_types == ['Egress'] or
- (policy['spec'].get('egress') and
- not policy['spec'].get('ingress')))
diff --git a/kuryr_kubernetes/controller/handlers/vif.py b/kuryr_kubernetes/controller/handlers/vif.py
index 82aa06e5d..f6592be98 100644
--- a/kuryr_kubernetes/controller/handlers/vif.py
+++ b/kuryr_kubernetes/controller/handlers/vif.py
@@ -13,9 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
-
-from openstack import exceptions as os_exc
+from os_vif import objects
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -24,13 +22,12 @@
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
-from kuryr_kubernetes.controller.managers import prometheus_exporter as exp
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
-from kuryr_kubernetes import objects
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
+KURYRPORT_URI = constants.K8S_API_CRD_NAMESPACES + '/{ns}/kuryrports/{crd}'
class VIFHandler(k8s_base.ResourceEventHandler):
@@ -66,20 +63,16 @@ def __init__(self):
drivers.ServiceSecurityGroupsDriver.get_instance())
def on_present(self, pod):
- state = driver_utils.get_pod_state(pod)
- if (self._is_pod_completed(pod)):
- if state:
+ if self._move_annotations_to_crd(pod):
+ return
+
+ kp = driver_utils.get_kuryrport(pod)
+ if self._is_pod_completed(pod):
+ if kp:
LOG.debug("Pod has completed execution, removing the vifs")
- self.on_deleted(pod)
- try:
- self._set_pod_state(pod, None)
- except k_exc.K8sClientException:
- LOG.exception("Could not clear pod annotation")
- raise k_exc.ResourceNotReady(pod['metadata']['name'])
- except k_exc.K8sResourceNotFound:
- pass
+ self.on_finalize(pod)
else:
- LOG.debug("Pod has completed execution, no annotation found."
+ LOG.debug("Pod has completed execution, no KuryrPort found."
" Skipping")
return
@@ -90,134 +83,31 @@ def on_present(self, pod):
# where certain pods/namespaces/nodes can be managed by other
# networking solutions/CNI drivers.
return
- LOG.debug("Got VIFs from annotation: %r", state)
- project_id = self._drv_project.get_project(pod)
- security_groups = self._drv_sg.get_security_groups(pod, project_id)
- if not state:
- try:
- subnets = self._drv_subnets.get_subnets(pod, project_id)
- except (os_exc.ResourceNotFound, k_exc.K8sResourceNotFound):
- LOG.warning("Subnet does not exists. If namespace driver is "
- "used, probably the namespace for the pod is "
- "already deleted. So this pod does not need to "
- "get a port as it will be deleted too. If the "
- "default subnet driver is used, then you must "
- "select an existing subnet to be used by Kuryr.")
- return
- # Request the default interface of pod
- main_vif = self._drv_vif_pool.request_vif(
- pod, project_id, subnets, security_groups)
-
- if not main_vif:
- pod_name = pod['metadata']['name']
- LOG.warning("Ignoring event due to pod %s not being "
- "scheduled yet.", pod_name)
- return
-
- state = objects.vif.PodState(default_vif=main_vif)
-
- # Request the additional interfaces from multiple dirvers
- additional_vifs = []
- for driver in self._drv_multi_vif:
- additional_vifs.extend(
- driver.request_additional_vifs(
- pod, project_id, security_groups))
- if additional_vifs:
- state.additional_vifs = {}
- for i, vif in enumerate(additional_vifs, start=1):
- k = (oslo_cfg.CONF.kubernetes.additional_ifname_prefix
- + str(i))
- state.additional_vifs[k] = vif
+ LOG.debug("Got KuryrPort: %r", kp)
+ if not kp:
try:
- self._set_pod_state(pod, state)
+ self._add_kuryrport_crd(pod)
except k_exc.K8sClientException as ex:
- LOG.debug("Failed to set annotation: %s", ex)
- # FIXME(ivc): improve granularity of K8sClient exceptions:
- # only resourceVersion conflict should be ignored
- for ifname, vif in state.vifs.items():
- self._drv_vif_pool.release_vif(pod, vif,
- project_id,
- security_groups)
- else:
- changed = False
- try:
- for ifname, vif in state.vifs.items():
- if (vif.plugin == constants.KURYR_VIF_TYPE_SRIOV and
- oslo_cfg.CONF.sriov.enable_node_annotations):
- driver_utils.update_port_pci_info(pod, vif)
- if not vif.active:
- try:
- self._drv_vif_pool.activate_vif(vif)
- changed = True
- except os_exc.ResourceNotFound:
- LOG.debug("Port not found, possibly already "
- "deleted. No need to activate it")
- finally:
- if changed:
- try:
- self._set_pod_state(pod, state)
- except k_exc.K8sResourceNotFound as ex:
- LOG.exception("Failed to set annotation: %s", ex)
- for ifname, vif in state.vifs.items():
- self._drv_vif_pool.release_vif(
- pod, vif, project_id,
- security_groups)
- except k_exc.K8sClientException:
- pod_name = pod['metadata']['name']
- raise k_exc.ResourceNotReady(pod_name)
- try:
- self._record_pod_creation_metric(pod)
- except Exception:
- LOG.debug("Failed to record metric for pod %s",
- pod['metadata']['name'])
- if self._is_network_policy_enabled():
- crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
- if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
- services = driver_utils.get_services()
- self._update_services(
- services, crd_pod_selectors, project_id)
+ LOG.exception("Kubernetes Client Exception creating "
+ "KuryrPort CRD: %s", ex)
+ raise k_exc.ResourceNotReady(pod)
- def on_deleted(self, pod):
- if (driver_utils.is_host_network(pod) or
- not pod['spec'].get('nodeName')):
- return
+ k8s = clients.get_kubernetes_client()
+ k8s.add_finalizer(pod, constants.POD_FINALIZER)
- project_id = self._drv_project.get_project(pod)
- try:
- crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
- except k_exc.ResourceNotReady:
- # NOTE(ltomasbo): If the pod is being deleted before
- # kuryr-controller annotated any information about the port
- # associated, there is no need for deleting sg rules associated to
- # it. So this exception could be safetly ignored for the current
- # sg drivers. Only the NP driver associates rules to the pods ips,
- # and that waits for annotations to start.
- LOG.debug("Pod was not yet annotated by Kuryr-controller. "
- "Skipping SG rules deletion associated to the pod %s",
- pod)
- crd_pod_selectors = []
+ def on_finalize(self, pod):
+ k8s = clients.get_kubernetes_client()
try:
- security_groups = self._drv_sg.get_security_groups(pod, project_id)
- except k_exc.ResourceNotReady:
- # NOTE(ltomasbo): If the namespace object gets deleted first the
- # namespace security group driver will raise a ResourceNotReady
- # exception as it cannot access anymore the kuryrnetwork CRD
- # annotated on the namespace object. In such case we set security
- # groups to empty list so that if pools are enabled they will be
- # properly released.
- security_groups = []
+ k8s.delete(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"],
+ crd=pod["metadata"]["name"]))
+ except k_exc.K8sResourceNotFound:
+ k8s.remove_finalizer(pod, constants.POD_FINALIZER)
- state = driver_utils.get_pod_state(pod)
- LOG.debug("Got VIFs from annotation: %r", state)
- if state:
- for ifname, vif in state.vifs.items():
- self._drv_vif_pool.release_vif(pod, vif, project_id,
- security_groups)
- if (self._is_network_policy_enabled() and crd_pod_selectors and
- oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
- services = driver_utils.get_services()
- self._update_services(services, crd_pod_selectors, project_id)
+ except k_exc.K8sClientException:
+ LOG.exception("Could not remove KuryrPort CRD for pod %s.",
+ pod['metadata']['name'])
+ raise k_exc.ResourceNotReady(pod['metadata']['name'])
def is_ready(self, quota):
if (utils.has_limit(quota.ports) and
@@ -244,42 +134,6 @@ def _is_pod_completed(pod):
except KeyError:
return False
- def _set_pod_state(self, pod, state):
- # TODO(ivc): extract annotation interactions
- if not state:
- old_annotation = pod['metadata'].get('annotations', {})
- LOG.debug("Removing VIFs annotation: %r for pod %s/%s (uid: %s)",
- old_annotation.get(constants.K8S_ANNOTATION_VIF),
- pod['metadata']['namespace'], pod['metadata']['name'],
- pod['metadata']['uid'])
- annotation = None
- else:
- state_dict = state.obj_to_primitive()
- annotation = jsonutils.dumps(state_dict, sort_keys=True)
- LOG.debug("Setting VIFs annotation: %r for pod %s/%s (uid: %s)",
- annotation, pod['metadata']['namespace'],
- pod['metadata']['name'], pod['metadata']['uid'])
-
- labels = pod['metadata'].get('labels')
- if not labels:
- LOG.debug("Removing Label annotation: %r", labels)
- labels_annotation = None
- else:
- labels_annotation = jsonutils.dumps(labels, sort_keys=True)
- LOG.debug("Setting Labels annotation: %r", labels_annotation)
-
- # NOTE(dulek): We don't care about compatibility with Queens format
- # here, as eventually all Kuryr services will be upgraded
- # and cluster will start working normally. Meanwhile
- # we just ignore issue of old services being unable to
- # read new annotations.
-
- k8s = clients.get_kubernetes_client()
- k8s.annotate(pod['metadata']['selfLink'],
- {constants.K8S_ANNOTATION_VIF: annotation,
- constants.K8S_ANNOTATION_LABEL: labels_annotation},
- resource_version=pod['metadata']['resourceVersion'])
-
def _update_services(self, services, crd_pod_selectors, project_id):
for service in services.get('items'):
if not driver_utils.service_matches_affected_pods(
@@ -294,13 +148,58 @@ def _is_network_policy_enabled(self):
svc_sg_driver = oslo_cfg.CONF.kubernetes.service_security_groups_driver
return ('policy' in enabled_handlers and svc_sg_driver == 'policy')
- def _record_pod_creation_metric(self, pod):
- exporter = exp.ControllerPrometheusExporter.get_instance()
- for condition in pod['status'].get('conditions'):
- if condition['type'] == 'PodScheduled' and condition['status']:
- f_str = "%Y-%m-%dT%H:%M:%SZ"
- time_obj = datetime.datetime.strptime(
- condition['lastTransitionTime'], f_str)
- pod_creation_time = datetime.datetime.now() - time_obj
- pod_creation_sec = (pod_creation_time).total_seconds()
- exporter.record_pod_creation_metric(pod_creation_sec)
+ def _add_kuryrport_crd(self, pod, vifs=None):
+ LOG.debug('Adding CRD %s', pod["metadata"]["name"])
+
+ if not vifs:
+ vifs = {}
+
+ kuryr_port = {
+ 'apiVersion': constants.K8S_API_CRD_VERSION,
+ 'kind': constants.K8S_OBJ_KURYRPORT,
+ 'metadata': {
+ 'name': pod['metadata']['name'],
+ 'finalizers': [constants.KURYRPORT_FINALIZER],
+ 'labels': {
+ constants.KURYRPORT_LABEL: pod['spec']['nodeName']
+ }
+ },
+ 'spec': {
+ 'podUid': pod['metadata']['uid'],
+ 'podNodeName': pod['spec']['nodeName'],
+ 'vifs': vifs
+ }
+ }
+
+ k8s = clients.get_kubernetes_client()
+ k8s.post(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"],
+ crd=''), kuryr_port)
+
+ def _move_annotations_to_crd(self, pod):
+ """Support upgrade from annotations to KuryrPort CRD."""
+ try:
+ state = (pod['metadata']['annotations']
+ [constants.K8S_ANNOTATION_VIF])
+ except KeyError:
+ return False
+
+ _dict = jsonutils.loads(state)
+ state = objects.base.VersionedObject.obj_from_primitive(_dict)
+
+ vifs = {ifname: {'default': state.default_vif == vif,
+ 'vif': objects.base.VersionedObject
+ .obj_to_primitive(vif)}
+ for ifname, vif in state.vifs.items()}
+
+ try:
+ self._add_kuryrport_crd(pod, vifs)
+ except k_exc.K8sClientException as ex:
+ LOG.exception("Kubernetes Client Exception recreating "
+ "KuryrPort CRD from annotation: %s", ex)
+ raise k_exc.ResourceNotReady(pod)
+
+ k8s = clients.get_kubernetes_client()
+ k8s.remove_annotations(pod['metadata']['selfLink'],
+ constants.K8S_ANNOTATION_VIF)
+
+ return True
diff --git a/kuryr_kubernetes/k8s_client.py b/kuryr_kubernetes/k8s_client.py
index a228d8b07..3ea0a6cb0 100644
--- a/kuryr_kubernetes/k8s_client.py
+++ b/kuryr_kubernetes/k8s_client.py
@@ -293,6 +293,21 @@ def remove_finalizer(self, obj, finalizer):
# If after 3 iterations there's still conflict, just raise.
self._raise_from_response(response)
+ def get_loadbalancer_crd(self, obj):
+ name = obj['metadata']['name']
+ namespace = obj['metadata']['namespace']
+
+ try:
+ crd = self.get('{}/{}/kuryrloadbalancers/{}'.format(
+ constants.K8S_API_CRD_NAMESPACES, namespace,
+ name))
+ except exc.K8sResourceNotFound:
+ return None
+ except exc.K8sClientException:
+ LOG.exception("Kubernetes Client Exception.")
+ raise
+ return crd
+
def annotate(self, path, annotations, resource_version=None):
"""Pushes a resource annotation to the K8s API resource
@@ -316,7 +331,7 @@ def annotate(self, path, annotations, resource_version=None):
headers=header, cert=self.cert,
verify=self.verify_server)
if response.ok:
- return response.json()['metadata']['annotations']
+ return response.json()['metadata'].get('annotations', {})
if response.status_code == requests.codes.conflict:
resource = self.get(path)
new_version = resource['metadata']['resourceVersion']
diff --git a/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py b/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py
index a4e32d617..20faed59a 100644
--- a/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py
+++ b/kuryr_kubernetes/tests/unit/cni/plugins/test_k8s_cni_registry.py
@@ -29,10 +29,11 @@ def setUp(self):
self.k8s_mock = self.useFixture(kuryr_fixtures.MockK8sClient()).client
self.default_iface = 'baz'
self.additional_iface = 'eth1'
- self.pod = {'metadata': {'name': 'foo', 'uid': 'bar',
- 'namespace': 'default', 'selfLink': 'baz'}}
- self.vifs = fake._fake_vifs_dict()
- registry = {'default/foo': {'pod': self.pod, 'vifs': self.vifs,
+ self.kp = {'metadata': {'name': 'foo', 'uid': 'bar',
+ 'namespace': 'default', 'selfLink': 'baz'},
+ 'spec': {'podUid': 'bar'}}
+ self.vifs = fake._fake_vifs()
+ registry = {'default/foo': {'kp': self.kp, 'vifs': self.vifs,
'containerid': None,
'vif_unplugged': False,
'del_received': False}}
@@ -46,7 +47,7 @@ def setUp(self):
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch('kuryr_kubernetes.cni.binding.base.connect')
def test_add_present(self, m_connect, m_lock):
- self.k8s_mock.get.return_value = self.pod
+ self.k8s_mock.get.return_value = self.kp
self.plugin.add(self.params)
@@ -99,7 +100,7 @@ def test_remove_pod_from_registry_after_del(self, m_disconnect, m_lock):
@mock.patch('kuryr_kubernetes.cni.binding.base.disconnect')
def test_del_wrong_container_id(self, m_disconnect):
- registry = {'default/foo': {'pod': self.pod, 'vifs': self.vifs,
+ registry = {'default/foo': {'kp': self.kp, 'vifs': self.vifs,
'containerid': 'different'}}
healthy = mock.Mock()
self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry, healthy)
@@ -112,11 +113,11 @@ def test_del_wrong_container_id(self, m_disconnect):
@mock.patch('kuryr_kubernetes.cni.binding.base.connect')
def test_add_present_on_5_try(self, m_connect, m_lock):
se = [KeyError] * 5
- se.append({'pod': self.pod, 'vifs': self.vifs, 'containerid': None,
+ se.append({'kp': self.kp, 'vifs': self.vifs, 'containerid': None,
'vif_unplugged': False, 'del_received': False})
- se.append({'pod': self.pod, 'vifs': self.vifs, 'containerid': None,
+ se.append({'kp': self.kp, 'vifs': self.vifs, 'containerid': None,
'vif_unplugged': False, 'del_received': False})
- se.append({'pod': self.pod, 'vifs': self.vifs, 'containerid': None,
+ se.append({'kp': self.kp, 'vifs': self.vifs, 'containerid': None,
'vif_unplugged': False, 'del_received': False})
m_getitem = mock.Mock(side_effect=se)
m_setitem = mock.Mock()
@@ -127,7 +128,7 @@ def test_add_present_on_5_try(self, m_connect, m_lock):
m_lock.assert_called_with('default/foo', external=True)
m_setitem.assert_called_once_with('default/foo',
- {'pod': self.pod,
+ {'kp': self.kp,
'vifs': self.vifs,
'containerid': 'cont_id',
'vif_unplugged': False,
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py
index 38e6861c9..e64b25a16 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_lb_public_ip.py
@@ -53,10 +53,11 @@ def test_acquire_service_pub_ip_info_usr_specified_ip(self):
spec_type = 'LoadBalancer'
spec_lb_ip = '1.2.3.4'
- expected_resp = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='user'))
+ expected_resp = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'user'
+ }
result = cls.acquire_service_pub_ip_info(m_driver, spec_type,
spec_lb_ip, project_id)
@@ -134,9 +135,11 @@ def test_acquire_service_pub_ip_info_pool_subnet_is_none(self, m_cfg):
spec_type = 'LoadBalancer'
spec_lb_ip = None
- expected_resp = obj_lbaas.LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool')
+ expected_resp = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
result = cls.acquire_service_pub_ip_info(m_driver, spec_type,
spec_lb_ip, project_id)
@@ -161,9 +164,11 @@ def test_acquire_service_pub_ip_info_alloc_from_pool(self, m_cfg):
spec_type = 'LoadBalancer'
spec_lb_ip = None
- expected_resp = obj_lbaas.LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool')
+ expected_resp = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
result = cls.acquire_service_pub_ip_info(m_driver, spec_type,
spec_lb_ip, project_id)
@@ -184,10 +189,11 @@ def test_release_pub_ip_alloc_method_non_pool(self):
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='kk'))
+ service_pub_ip_info = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'kk'
+ }
rc = cls.release_pub_ip(m_driver, service_pub_ip_info)
self.assertIs(rc, True)
@@ -199,10 +205,12 @@ def test_release_pub_ip_alloc_method_user(self):
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='user'))
+ service_pub_ip_info = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'user'
+ }
+
rc = cls.release_pub_ip(m_driver, service_pub_ip_info)
self.assertIs(rc, True)
@@ -216,10 +224,11 @@ def test_release_pub_ip_alloc_method_pool_neutron_exception(self):
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool'))
+ service_pub_ip_info = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
rc = cls.release_pub_ip(m_driver, service_pub_ip_info)
self.assertIs(rc, False)
@@ -232,10 +241,11 @@ def test_release_pub_ip_alloc_method_pool_neutron_succeeded(self):
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool'))
+ service_pub_ip_info = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
rc = cls.release_pub_ip(m_driver, service_pub_ip_info)
self.assertIs(rc, True)
@@ -265,6 +275,11 @@ def test_associate_lb_fip_id_not_exist(self):
.LBaaSPubIp(ip_id=0,
ip_addr=fip.floating_ip_address,
alloc_method='pool'))
+ service_pub_ip_info = {
+ 'ip_id': 0,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
vip_port_id = 'ec29d641-fec4-4f67-928a-124a76b3a777'
@@ -281,10 +296,12 @@ def test_associate_lb_fip_id_not_exist_neutron_exception(self):
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool'))
+
+ service_pub_ip_info = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
vip_port_id = 'ec29d641-fec4-4f67-928a-124a76b3a777'
self.assertRaises(os_exc.SDKException, cls.associate_pub_ip,
@@ -308,10 +325,11 @@ def test_disassociate_pub_ip_fip_id_not_exist(self):
os_net.update_floatingip.return_value = None
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=0,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool'))
+ service_pub_ip_info = {
+ 'ip_id': 0,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
result = cls.disassociate_pub_ip(m_driver, service_pub_ip_info)
@@ -325,10 +343,12 @@ def test_disassociate_pub_ip_neutron_exception(self):
os_net.update_ip.side_effect = os_exc.SDKException
fip = munch.Munch({'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'})
- service_pub_ip_info = (obj_lbaas
- .LBaaSPubIp(ip_id=fip.id,
- ip_addr=fip.floating_ip_address,
- alloc_method='pool'))
+
+ service_pub_ip_info = {
+ 'ip_id': fip.id,
+ 'ip_addr': fip.floating_ip_address,
+ 'alloc_method': 'pool'
+ }
self.assertRaises(os_exc.SDKException, cls.disassociate_pub_ip,
m_driver, service_pub_ip_info)
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py
index a8e1af545..ec2d35cee 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py
@@ -112,9 +112,11 @@ def test_ensure_loadbalancer(self):
os_net = self.useFixture(k_fix.MockNetworkClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- expected_resp = obj_lbaas.LBaaSLoadBalancer(
- provider='octavia', port_id='D3FA400A-F543-4B91-9CD3-047AF0CE42E2',
- security_groups=[])
+ expected_resp = {
+ 'provide': 'octavia',
+ 'port_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42E2',
+ 'security_groups': []
+ }
project_id = 'TEST_PROJECT'
subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1'
ip = '1.2.3.4'
@@ -129,10 +131,10 @@ def test_ensure_loadbalancer(self):
m_driver._find_loadbalancer,
mock.ANY)
req = m_driver._ensure.call_args[0][2]
- self.assertEqual(lb_name, req.name)
- self.assertEqual(project_id, req.project_id)
- self.assertEqual(subnet_id, req.subnet_id)
- self.assertEqual(ip, str(req.ip))
+ self.assertEqual(lb_name, req['name'])
+ self.assertEqual(project_id, req['project_id'])
+ self.assertEqual(subnet_id, req['subnet_id'])
+ self.assertEqual(ip, str(req['ip']))
self.assertEqual(expected_resp, resp)
os_net.update_port.assert_not_called()
@@ -157,29 +159,38 @@ def test_cascade_release_loadbalancer(self):
lbaas.lbaas_loadbalancer_path = "boo %s"
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = mock.Mock()
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'security_groups': [],
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'provider': None
+ }
cls.release_loadbalancer(m_driver, loadbalancer)
m_driver._release.assert_called_once_with(
loadbalancer, loadbalancer, lbaas.delete_load_balancer,
- loadbalancer.id, cascade=True)
+ loadbalancer['id'], cascade=True)
def _test_ensure_listener(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
expected_resp = mock.sentinel.expected_resp
- name = 'TEST_NAME'
project_id = 'TEST_PROJECT'
- subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1'
- ip = '1.2.3.4'
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
protocol = 'TCP'
- provider = 'amphora'
port = 1234
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id=loadbalancer_id, name=name, project_id=project_id,
- subnet_id=subnet_id, ip=ip, provider=provider)
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': project_id,
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'provider': 'amphora'
+ }
# TODO(ivc): handle security groups
m_driver._ensure_provisioned.return_value = expected_resp
@@ -191,28 +202,27 @@ def _test_ensure_listener(self):
m_driver._find_listener, d_lbaasv2._LB_STS_POLL_SLOW_INTERVAL)
listener = m_driver._ensure_provisioned.call_args[0][1]
- self.assertEqual("%s:%s:%s" % (loadbalancer.name, protocol, port),
- listener.name)
- self.assertEqual(project_id, listener.project_id)
- self.assertEqual(loadbalancer_id, listener.loadbalancer_id)
- self.assertEqual(protocol, listener.protocol)
- self.assertEqual(port, listener.port)
+ self.assertEqual("%s:%s:%s" % (loadbalancer['name'], protocol, port),
+ listener['name'])
+ self.assertEqual(project_id, listener['project_id'])
+ self.assertEqual(loadbalancer_id, listener['loadbalancer_id'])
+ self.assertEqual(protocol, listener['protocol'])
+ self.assertEqual(port, listener['port'])
self.assertEqual(expected_resp, resp)
def test_ensure_listener_bad_request_exception(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- name = 'TEST_NAME'
- project_id = 'TEST_PROJECT'
- subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1'
- ip = '1.2.3.4'
- loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
port = 1234
protocol = 'TCP'
- provider = 'amphora'
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id=loadbalancer_id, name=name, project_id=project_id,
- subnet_id=subnet_id, ip=ip, provider=provider)
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'provider': 'amphora'
+ }
m_driver._ensure_provisioned.side_effect = os_exc.BadRequestException
resp = cls.ensure_listener(m_driver, loadbalancer,
@@ -227,26 +237,43 @@ def test_release_listener(self):
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
m_driver._get_vip_port.return_value = munch.Munch({
'security_group_ids': [mock.sentinel.sg_id]})
- loadbalancer = mock.Mock()
- listener = mock.Mock()
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'security_groups': [],
+ 'provider': 'amphora'
+ }
+ listener = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'protocol': 'TCP',
+ 'port': 1234,
+ 'id': 'A57B7771-6050-4CA8-A63C-443493EC98AB'
+ }
cls.release_listener(m_driver, loadbalancer, listener)
m_driver._release.assert_called_once_with(loadbalancer, listener,
lbaas.delete_listener,
- listener.id)
+ listener['id'])
def test_ensure_pool(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
expected_resp = mock.sentinel.expected_resp
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
- project_id='TEST_PROJECT')
- listener = obj_lbaas.LBaaSListener(
- id='A57B7771-6050-4CA8-A63C-443493EC98AB',
- name='TEST_LISTENER_NAME',
- protocol='TCP')
+ loadbalancer = {
+ 'project_id': 'TEST_PROJECT',
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ }
+ listener = {
+ 'id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'name': 'TEST_LISTENER_NAME',
+ 'protocol': 'TCP',
+ }
m_driver._ensure_provisioned.return_value = expected_resp
resp = cls.ensure_pool(m_driver, loadbalancer, listener)
@@ -255,10 +282,10 @@ def test_ensure_pool(self):
loadbalancer, mock.ANY, m_driver._create_pool,
m_driver._find_pool)
pool = m_driver._ensure_provisioned.call_args[0][1]
- self.assertEqual(listener.name, pool.name)
- self.assertEqual(loadbalancer.project_id, pool.project_id)
- self.assertEqual(listener.id, pool.listener_id)
- self.assertEqual(listener.protocol, pool.protocol)
+ self.assertEqual(listener['name'], pool['name'])
+ self.assertEqual(loadbalancer['project_id'], pool['project_id'])
+ self.assertEqual(listener['id'], pool['listener_id'])
+ self.assertEqual(listener['protocol'], pool['protocol'])
self.assertEqual(expected_resp, resp)
def test_release_pool(self):
@@ -266,23 +293,34 @@ def test_release_pool(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
loadbalancer = mock.Mock()
- pool = mock.Mock()
+ pool = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'protocol': 'TCP',
+ 'id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77'
+ }
cls.release_pool(m_driver, loadbalancer, pool)
m_driver._release.assert_called_once_with(loadbalancer, pool,
lbaas.delete_pool,
- pool.id)
+ pool['id'])
def test_ensure_member(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
expected_resp = mock.sentinel.expected_resp
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
- project_id='TEST_PROJECT')
- pool = obj_lbaas.LBaaSPool(project_id='TEST_PROJECT',
- id='D4F35594-27EB-4F4C-930C-31DD40F53B77')
+ loadbalancer = {
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'project_id': 'TEST_PROJECT'
+ }
+ pool = {
+ 'id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77',
+ 'project_id': 'TEST_PROJECT'
+ }
+
subnet_id = 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1'
ip = '1.2.3.4'
port = 1234
@@ -299,41 +337,63 @@ def test_ensure_member(self):
loadbalancer, mock.ANY, m_driver._create_member,
m_driver._find_member)
member = m_driver._ensure_provisioned.call_args[0][1]
- self.assertEqual("%s/%s:%s" % (namespace, name, port), member.name)
- self.assertEqual(pool.project_id, member.project_id)
- self.assertEqual(pool.id, member.pool_id)
- self.assertEqual(subnet_id, member.subnet_id)
- self.assertEqual(ip, str(member.ip))
- self.assertEqual(port, member.port)
+ self.assertEqual("%s/%s:%s" % (namespace, name, port), member['name'])
+ self.assertEqual(pool['project_id'], member['project_id'])
+ self.assertEqual(pool['id'], member['pool_id'])
+ self.assertEqual(subnet_id, member['subnet_id'])
+ self.assertEqual(ip, str(member['ip']))
+ self.assertEqual(port, member['port'])
self.assertEqual(expected_resp, resp)
def test_release_member(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = mock.Mock()
- member = mock.Mock()
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'security_groups': [],
+ 'provider': None
+ }
+
+ member = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'port': 1234,
+ 'id': '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
+ }
cls.release_member(m_driver, loadbalancer, member)
m_driver._release.assert_called_once_with(loadbalancer, member,
lbaas.delete_member,
- member.id, member.pool_id)
+ member['id'],
+ member['pool_id'])
def test_create_loadbalancer(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- security_groups=[])
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'security_groups': [],
+ 'provider': None
+ }
+
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
req = {
- 'name': loadbalancer.name,
- 'project_id': loadbalancer.project_id,
- 'vip_address': str(loadbalancer.ip),
- 'vip_subnet_id': loadbalancer.subnet_id,
+ 'name': loadbalancer['name'],
+ 'project_id': loadbalancer['project_id'],
+ 'vip_address': str(loadbalancer['ip']),
+ 'vip_subnet_id': loadbalancer['subnet_id'],
}
resp = o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy')
lbaas.create_load_balancer.return_value = resp
@@ -342,27 +402,28 @@ def test_create_loadbalancer(self):
ret = cls._create_loadbalancer(m_driver, loadbalancer)
lbaas.create_load_balancer.assert_called_once_with(**req)
- for attr in loadbalancer.obj_fields:
- self.assertEqual(getattr(loadbalancer, attr),
- getattr(ret, attr))
- self.assertEqual(loadbalancer_id, ret.id)
+ self.assertEqual(loadbalancer, ret)
+ self.assertEqual(loadbalancer_id, ret['id'])
def test_create_loadbalancer_provider_defined(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- security_groups=[],
- provider='amphora')
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'security_groups': [],
+ 'provider': 'amphora'
+ }
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
req = {
- 'name': loadbalancer.name,
- 'project_id': loadbalancer.project_id,
- 'vip_address': str(loadbalancer.ip),
- 'vip_subnet_id': loadbalancer.subnet_id,
- 'provider': loadbalancer.provider,
+ 'name': loadbalancer['name'],
+ 'project_id': loadbalancer['project_id'],
+ 'vip_address': str(loadbalancer['ip']),
+ 'vip_subnet_id': loadbalancer['subnet_id'],
+ 'provider': loadbalancer['provider'],
}
resp = o_lb.LoadBalancer(id=loadbalancer_id, provider='amphora')
lbaas.create_load_balancer.return_value = resp
@@ -371,27 +432,28 @@ def test_create_loadbalancer_provider_defined(self):
ret = cls._create_loadbalancer(m_driver, loadbalancer)
lbaas.create_load_balancer.assert_called_once_with(**req)
- for attr in loadbalancer.obj_fields:
- self.assertEqual(getattr(loadbalancer, attr),
- getattr(ret, attr))
- self.assertEqual(loadbalancer_id, ret.id)
+ self.assertEqual(loadbalancer, ret)
+ self.assertEqual(loadbalancer_id, ret['id'])
def test_create_loadbalancer_provider_mismatch(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- security_groups=[],
- provider='amphora')
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'security_groups': [],
+ 'provider': 'amphora'
+ }
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
req = {
- 'name': loadbalancer.name,
- 'project_id': loadbalancer.project_id,
- 'vip_address': str(loadbalancer.ip),
- 'vip_subnet_id': loadbalancer.subnet_id,
- 'provider': loadbalancer.provider,
+ 'name': loadbalancer['name'],
+ 'project_id': loadbalancer['project_id'],
+ 'vip_address': str(loadbalancer['ip']),
+ 'vip_subnet_id': loadbalancer['subnet_id'],
+ 'provider': loadbalancer['provider'],
}
resp = o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy')
lbaas.create_load_balancer.return_value = resp
@@ -406,10 +468,14 @@ def test_find_loadbalancer(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- provider='haproxy', security_groups=[])
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'security_groups': [],
+ 'provider': 'haproxy'
+ }
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
resp = iter([o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy',
provisioning_status='ACTIVE')])
@@ -419,15 +485,13 @@ def test_find_loadbalancer(self):
ret = cls._find_loadbalancer(m_driver, loadbalancer)
lbaas.load_balancers.assert_called_once_with(
- name=loadbalancer.name,
- project_id=loadbalancer.project_id,
- vip_address=str(loadbalancer.ip),
- vip_subnet_id=loadbalancer.subnet_id,
+ name=loadbalancer['name'],
+ project_id=loadbalancer['project_id'],
+ vip_address=str(loadbalancer['ip']),
+ vip_subnet_id=loadbalancer['subnet_id'],
provider='haproxy')
- for attr in loadbalancer.obj_fields:
- self.assertEqual(getattr(loadbalancer, attr),
- getattr(ret, attr))
- self.assertEqual(loadbalancer_id, ret.id)
+ self.assertEqual(loadbalancer, ret)
+ self.assertEqual(loadbalancer_id, ret['id'])
m_driver.release_loadbalancer.assert_not_called()
def test_find_loadbalancer_not_found(self):
@@ -437,15 +501,22 @@ def test_find_loadbalancer_not_found(self):
loadbalancer = obj_lbaas.LBaaSLoadBalancer(
name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'provider': None
+ }
resp = iter([])
lbaas.load_balancers.return_value = resp
ret = cls._find_loadbalancer(m_driver, loadbalancer)
lbaas.load_balancers.assert_called_once_with(
- name=loadbalancer.name,
- project_id=loadbalancer.project_id,
- vip_address=str(loadbalancer.ip),
- vip_subnet_id=loadbalancer.subnet_id,
+ name=loadbalancer['name'],
+ project_id=loadbalancer['project_id'],
+ vip_address=str(loadbalancer['ip']),
+ vip_subnet_id=loadbalancer['subnet_id'],
provider=None)
self.assertIsNone(ret)
m_driver.release_loadbalancer.assert_not_called()
@@ -454,9 +525,13 @@ def test_find_loadbalancer_error(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1')
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'provider': None
+ }
loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
resp = iter([o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy',
provisioning_status='ERROR')])
@@ -466,10 +541,10 @@ def test_find_loadbalancer_error(self):
ret = cls._find_loadbalancer(m_driver, loadbalancer)
lbaas.load_balancers.assert_called_once_with(
- name=loadbalancer.name,
- project_id=loadbalancer.project_id,
- vip_address=str(loadbalancer.ip),
- vip_subnet_id=loadbalancer.subnet_id,
+ name=loadbalancer['name'],
+ project_id=loadbalancer['project_id'],
+ vip_address=str(loadbalancer['ip']),
+ vip_subnet_id=loadbalancer['subnet_id'],
provider=None)
self.assertIsNone(ret)
m_driver.release_loadbalancer.assert_called_once()
@@ -478,69 +553,80 @@ def test_create_listener(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
- listener = obj_lbaas.LBaaSListener(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- port=1234, loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ listener = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'protocol': 'TCP',
+ 'port': 1234
+ }
listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB'
+
req = {
- 'name': listener.name,
- 'project_id': listener.project_id,
- 'loadbalancer_id': listener.loadbalancer_id,
- 'protocol': listener.protocol,
- 'protocol_port': listener.port}
+ 'name': listener['name'],
+ 'project_id': listener['project_id'],
+ 'loadbalancer_id': listener['loadbalancer_id'],
+ 'protocol': listener['protocol'],
+ 'protocol_port': listener['port']}
resp = o_lis.Listener(id=listener_id)
lbaas.create_listener.return_value = resp
ret = cls._create_listener(m_driver, listener)
lbaas.create_listener.assert_called_once_with(**req)
- for attr in listener.obj_fields:
- self.assertEqual(getattr(listener, attr),
- getattr(ret, attr))
- self.assertEqual(listener_id, ret.id)
+ self.assertEqual(listener, ret)
+ self.assertEqual(listener_id, ret['id'])
def test_find_listener(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
- listener = obj_lbaas.LBaaSListener(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- port=1234, loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ loadbalancer = {
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ }
+ listener = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'protocol': 'TCP',
+ 'port': 1234
+ }
listener_id = 'A57B7771-6050-4CA8-A63C-443493EC98AB'
lbaas.listeners.return_value = iter([o_lis.Listener(id=listener_id)])
ret = cls._find_listener(m_driver, listener, loadbalancer)
lbaas.listeners.assert_called_once_with(
- name=listener.name,
- project_id=listener.project_id,
- load_balancer_id=listener.loadbalancer_id,
- protocol=listener.protocol,
- protocol_port=listener.port)
- for attr in listener.obj_fields:
- self.assertEqual(getattr(listener, attr),
- getattr(ret, attr))
- self.assertEqual(listener_id, ret.id)
+ name=listener['name'],
+ project_id=listener['project_id'],
+ load_balancer_id=listener['loadbalancer_id'],
+ protocol=listener['protocol'],
+ protocol_port=listener['port'])
+ self.assertEqual(listener, ret)
+ self.assertEqual(listener_id, ret['id'])
def test_find_listener_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
- listener = obj_lbaas.LBaaSListener(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- port=1234, loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ loadbalancer = {
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ }
+ listener = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'protocol': 'TCP',
+ 'port': 1234
+ }
resp = iter([])
lbaas.listeners.return_value = resp
ret = cls._find_listener(m_driver, listener, loadbalancer)
lbaas.listeners.assert_called_once_with(
- name=listener.name,
- project_id=listener.project_id,
- load_balancer_id=listener.loadbalancer_id,
- protocol=listener.protocol,
- protocol_port=listener.port)
+ name=listener['name'],
+ project_id=listener['project_id'],
+ load_balancer_id=listener['loadbalancer_id'],
+ protocol=listener['protocol'],
+ protocol_port=listener['port'])
self.assertIsNone(ret)
def test_create_pool(self):
@@ -548,44 +634,49 @@ def test_create_pool(self):
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
lb_algorithm = 'ROUND_ROBIN'
- pool = obj_lbaas.LBaaSPool(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- listener_id='A57B7771-6050-4CA8-A63C-443493EC98AB',
- loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ pool = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'protocol': 'TCP'
+ }
pool_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77'
+
req = {
- 'name': pool.name,
- 'project_id': pool.project_id,
- 'listener_id': pool.listener_id,
- 'loadbalancer_id': pool.loadbalancer_id,
- 'protocol': pool.protocol,
+ 'name': pool['name'],
+ 'project_id': pool['project_id'],
+ 'listener_id': pool['listener_id'],
+ 'loadbalancer_id': pool['loadbalancer_id'],
+ 'protocol': pool['protocol'],
'lb_algorithm': lb_algorithm}
resp = o_pool.Pool(id=pool_id)
lbaas.create_pool.return_value = resp
ret = cls._create_pool(m_driver, pool)
lbaas.create_pool.assert_called_once_with(**req)
- for attr in pool.obj_fields:
- self.assertEqual(getattr(pool, attr),
- getattr(ret, attr))
- self.assertEqual(pool_id, ret.id)
+ self.assertEqual(pool, ret)
+ self.assertEqual(pool_id, ret['id'])
def test_create_pool_with_different_lb_algorithm(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
lb_algorithm = 'SOURCE_IP_PORT'
- pool = obj_lbaas.LBaaSPool(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- listener_id='A57B7771-6050-4CA8-A63C-443493EC98AB',
- loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ pool = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'protocol': 'TCP'
+ }
pool_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77'
req = {
- 'name': pool.name,
- 'project_id': pool.project_id,
- 'listener_id': pool.listener_id,
- 'loadbalancer_id': pool.loadbalancer_id,
- 'protocol': pool.protocol,
+ 'name': pool['name'],
+ 'project_id': pool['project_id'],
+ 'listener_id': pool['listener_id'],
+ 'loadbalancer_id': pool['loadbalancer_id'],
+ 'protocol': pool['protocol'],
'lb_algorithm': lb_algorithm}
resp = o_pool.Pool(id=pool_id)
lbaas.create_pool.return_value = resp
@@ -596,26 +687,27 @@ def test_create_pool_with_different_lb_algorithm(self):
ret = cls._create_pool(m_driver, pool)
lbaas.create_pool.assert_called_once_with(**req)
- for attr in pool.obj_fields:
- self.assertEqual(getattr(pool, attr),
- getattr(ret, attr))
- self.assertEqual(pool_id, ret.id)
+ self.assertEqual(pool, ret)
+ self.assertEqual(pool_id, ret['id'])
def test_create_pool_conflict(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
lb_algorithm = 'ROUND_ROBIN'
- pool = obj_lbaas.LBaaSPool(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- listener_id='A57B7771-6050-4CA8-A63C-443493EC98AB',
- loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ pool = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'protocol': 'TCP'
+ }
req = {
- 'name': pool.name,
- 'project_id': pool.project_id,
- 'listener_id': pool.listener_id,
- 'loadbalancer_id': pool.loadbalancer_id,
- 'protocol': pool.protocol,
+ 'name': pool['name'],
+ 'project_id': pool['project_id'],
+ 'listener_id': pool['listener_id'],
+ 'loadbalancer_id': pool['loadbalancer_id'],
+ 'protocol': pool['protocol'],
'lb_algorithm': lb_algorithm}
lbaas.create_pool.side_effect = os_exc.BadRequestException
@@ -627,120 +719,134 @@ def test_find_pool_by_listener(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
- pool = obj_lbaas.LBaaSPool(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- listener_id='A57B7771-6050-4CA8-A63C-443493EC98AB',
- loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ loadbalancer = {
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ }
+ pool = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'protocol': 'TCP'
+ }
pool_id = 'D4F35594-27EB-4F4C-930C-31DD40F53B77'
resp = [o_pool.Pool(id=pool_id,
- listeners=[{"id": pool.listener_id}])]
+ listeners=[{"id": pool['listener_id']}])]
lbaas.pools.return_value = resp
ret = cls._find_pool(m_driver, pool, loadbalancer)
lbaas.pools.assert_called_once_with(
- name=pool.name,
- project_id=pool.project_id,
- loadbalancer_id=pool.loadbalancer_id,
- protocol=pool.protocol)
- for attr in pool.obj_fields:
- self.assertEqual(getattr(pool, attr),
- getattr(ret, attr))
- self.assertEqual(pool_id, ret.id)
+ name=pool['name'],
+ project_id=pool['project_id'],
+ loadbalancer_id=pool['loadbalancer_id'],
+ protocol=pool['protocol'])
+ self.assertEqual(pool, ret)
+ self.assertEqual(pool_id, ret['id'])
def test_find_pool_by_listener_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = obj_lbaas.LBaaSLoadBalancer(
- id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
- pool = obj_lbaas.LBaaSPool(
- name='TEST_NAME', project_id='TEST_PROJECT', protocol='TCP',
- listener_id='A57B7771-6050-4CA8-A63C-443493EC98AB',
- loadbalancer_id='00EE9E11-91C2-41CF-8FD4-7970579E5C4C')
+ loadbalancer = {
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
+ }
+ pool = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'loadbalancer_id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C',
+ 'listener_id': 'A57B7771-6050-4CA8-A63C-443493EC98AB',
+ 'protocol': 'TCP'
+ }
resp = []
lbaas.pools.return_value = resp
ret = cls._find_pool(m_driver, pool, loadbalancer)
lbaas.pools.assert_called_once_with(
- name=pool.name,
- project_id=pool.project_id,
- loadbalancer_id=pool.loadbalancer_id,
- protocol=pool.protocol)
+ name=pool['name'],
+ project_id=pool['project_id'],
+ loadbalancer_id=pool['loadbalancer_id'],
+ protocol=pool['protocol'])
self.assertIsNone(ret)
def test_create_member(self):
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
- member = obj_lbaas.LBaaSMember(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- port=1234, subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- pool_id='D4F35594-27EB-4F4C-930C-31DD40F53B77')
+ member = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'port': 1234
+ }
member_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
req = {
- 'name': member.name,
- 'project_id': member.project_id,
- 'subnet_id': member.subnet_id,
- 'address': str(member.ip),
- 'protocol_port': member.port}
+ 'name': member['name'],
+ 'project_id': member['project_id'],
+ 'subnet_id': member['subnet_id'],
+ 'address': str(member['ip']),
+ 'protocol_port': member['port']}
resp = o_mem.Member(id=member_id)
lbaas.create_member.return_value = resp
ret = cls._create_member(m_driver, member)
- lbaas.create_member.assert_called_once_with(member.pool_id, **req)
- for attr in member.obj_fields:
- self.assertEqual(getattr(member, attr),
- getattr(ret, attr))
- self.assertEqual(member_id, ret.id)
+ lbaas.create_member.assert_called_once_with(member['pool_id'], **req)
+ self.assertEqual(member, ret)
+ self.assertEqual(member_id, ret['id'])
def test_find_member(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
loadbalancer = obj_lbaas.LBaaSLoadBalancer()
- member = obj_lbaas.LBaaSMember(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- port=1234, subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- pool_id='D4F35594-27EB-4F4C-930C-31DD40F53B77')
+ member = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'port': 1234
+ }
member_id = '3A70CEC0-392D-4BC1-A27C-06E63A0FD54F'
resp = iter([o_mem.Member(id=member_id)])
lbaas.members.return_value = resp
ret = cls._find_member(m_driver, member, loadbalancer)
lbaas.members.assert_called_once_with(
- member.pool_id,
- name=member.name,
- project_id=member.project_id,
- subnet_id=member.subnet_id,
- address=member.ip,
- protocol_port=member.port)
- for attr in member.obj_fields:
- self.assertEqual(getattr(member, attr),
- getattr(ret, attr))
- self.assertEqual(member_id, ret.id)
+ member['pool_id'],
+ name=member['name'],
+ project_id=member['project_id'],
+ subnet_id=member['subnet_id'],
+ address=member['ip'],
+ protocol_port=member['port'])
+ self.assertEqual(member, ret)
+ self.assertEqual(member_id, ret['id'])
def test_find_member_not_found(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
loadbalancer = obj_lbaas.LBaaSLoadBalancer()
- member = obj_lbaas.LBaaSMember(
- name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4',
- port=1234, subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
- pool_id='D4F35594-27EB-4F4C-930C-31DD40F53B77')
+ member = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'pool_id': 'D4F35594-27EB-4F4C-930C-31DD40F53B77',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'port': 1234
+ }
resp = iter([])
lbaas.members.return_value = resp
ret = cls._find_member(m_driver, member, loadbalancer)
lbaas.members.assert_called_once_with(
- member.pool_id,
- name=member.name,
- project_id=member.project_id,
- subnet_id=member.subnet_id,
- address=member.ip,
- protocol_port=member.port)
+ member['pool_id'],
+ name=member['name'],
+ project_id=member['project_id'],
+ subnet_id=member['subnet_id'],
+ address=member['ip'],
+ protocol_port=member['port'])
self.assertIsNone(ret)
def test_ensure(self):
@@ -888,7 +994,14 @@ def test_wait_for_provisioning(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = mock.Mock()
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'provider': None,
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
+ }
timeout = mock.sentinel.timeout
timer = [mock.sentinel.t0, mock.sentinel.t1]
m_driver._provisioning_timer.return_value = timer
@@ -897,13 +1010,20 @@ def test_wait_for_provisioning(self):
cls._wait_for_provisioning(m_driver, loadbalancer, timeout)
- lbaas.get_load_balancer.assert_called_once_with(loadbalancer.id)
+ lbaas.get_load_balancer.assert_called_once_with(loadbalancer['id'])
def test_wait_for_provisioning_not_ready(self):
lbaas = self.useFixture(k_fix.MockLBaaSClient()).client
cls = d_lbaasv2.LBaaSv2Driver
m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
- loadbalancer = mock.Mock()
+ loadbalancer = {
+ 'name': 'TEST_NAME',
+ 'project_id': 'TEST_PROJECT',
+ 'subnet_id': 'D3FA400A-F543-4B91-9CD3-047AF0CE42D1',
+ 'ip': '1.2.3.4',
+ 'provider': None,
+ 'id': '00EE9E11-91C2-41CF-8FD4-7970579E5C4C'
+ }
timeout = mock.sentinel.timeout
timer = [mock.sentinel.t0, mock.sentinel.t1]
m_driver._provisioning_timer.return_value = timer
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
index 81fd2e5f8..2232f5104 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import munch
-from openstack import exceptions as os_exc
from unittest import mock
from kuryr_kubernetes.controller.drivers import network_policy
@@ -75,8 +73,8 @@ def setUp(self):
self._policy_uid = mock.sentinel.policy_uid
self._policy_link = mock.sentinel.policy_link
self._sg_id = mock.sentinel.sg_id
- self._i_rules = [{'security_group_rule': {'id': ''}}]
- self._e_rules = [{'security_group_rule': {'id': ''}}]
+ self._i_rules = [{'sgRule': {'id': ''}}]
+ self._e_rules = [{'sgRule': {'id': ''}}]
self._policy = {
'apiVersion': 'networking.k8s.io/v1',
@@ -104,12 +102,46 @@ def setUp(self):
[{'namespaceSelector': {
'matchLabels': {
'project': 'myproject'}}}]}],
- 'policyTypes': ['Ingress', 'Egress']
+ 'policyTypes': ['Ingress', 'Egress'],
+ 'podSelector': {},
}
}
- self._crd = {
- 'metadata': {'name': mock.sentinel.name,
+ self.crd = {
+ 'metadata': {'name': 'foobar',
+ 'namespace': 'default',
+ 'selfLink': mock.sentinel.selfLink},
+ 'spec': {
+ 'egressSgRules': [
+ {'sgRule':
+ {'description': 'Kuryr-Kubernetes NetPolicy SG rule',
+ 'direction': 'egress',
+ 'ethertype': 'IPv4',
+ 'port_range_max': 5978,
+ 'port_range_min': 5978,
+ 'protocol': 'tcp',
+ }}],
+ 'ingressSgRules': [
+ {'sgRule':
+ {'description': 'Kuryr-Kubernetes NetPolicy SG rule',
+ 'direction': 'ingress',
+ 'ethertype': 'IPv4',
+ 'port_range_max': 6379,
+ 'port_range_min': 6379,
+ 'protocol': 'tcp',
+ }}],
+ 'podSelector': {},
+ 'policyTypes': self._policy['spec']['policyTypes']
+ },
+ 'status': {
+ 'securityGroupId': self._sg_id,
+ 'securityGroupRules': [],
+ 'podSelector': {},
+ }
+ }
+
+ self.old_crd = {
+ 'metadata': {'name': 'np-foobar',
'namespace': 'default',
'selfLink': mock.sentinel.selfLink},
'spec': {
@@ -135,6 +167,7 @@ def setUp(self):
'security_group_id': self._sg_id,
'id': mock.sentinel.id
}}],
+ 'podSelector': {},
'networkpolicy_spec': self._policy['spec'],
'securityGroupId': self._sg_id,
'securityGroupName': mock.sentinel.sg_name}}
@@ -144,207 +177,57 @@ def setUp(self):
self._driver = network_policy.NetworkPolicyDriver()
@mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd', return_value=False)
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'create_security_group_rules_from_network_policy')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'update_security_group_rules_from_network_policy')
- def test_ensure_network_policy(self, m_update, m_create, m_get_crd):
- self._driver.ensure_network_policy(self._policy, self._project_id)
-
- m_get_crd.assert_called_once_with(self._policy)
- m_create.assert_called_once_with(self._policy, self._project_id)
- m_update.assert_not_called()
-
- @mock.patch.object(network_policy.NetworkPolicyDriver, 'affected_pods')
- @mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd', return_value=True)
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'create_security_group_rules_from_network_policy')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'update_security_group_rules_from_network_policy')
- def test_ensure_network_policy_with_existing_crd(
- self, m_update, m_create, m_get_crd, m_namespaced, m_affected):
- previous_selector = mock.sentinel.previous_selector
- m_update.return_value = previous_selector
- self._driver.ensure_network_policy(self._policy, self._project_id)
-
- m_get_crd.assert_called_once_with(self._policy)
- m_create.assert_not_called()
- m_update.assert_called_once_with(self._policy)
- m_affected.assert_called_once_with(self._policy, previous_selector)
- m_namespaced.assert_not_called()
-
- @mock.patch.object(network_policy.NetworkPolicyDriver, 'affected_pods')
- @mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd', return_value=True)
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'create_security_group_rules_from_network_policy')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'update_security_group_rules_from_network_policy')
- def test_ensure_network_policy_with_existing_crd_no_selector(
- self, m_update, m_create, m_get_crd, m_namespaced, m_affected):
- m_update.return_value = None
- self._driver.ensure_network_policy(self._policy, self._project_id)
-
- m_get_crd.assert_called_once_with(self._policy)
- m_create.assert_not_called()
- m_update.assert_called_once_with(self._policy)
- m_affected.assert_not_called()
- m_namespaced.assert_called_once_with(self._policy)
-
- @mock.patch.object(network_policy.NetworkPolicyDriver, 'affected_pods')
- @mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'create_security_group_rules_from_network_policy')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'update_security_group_rules_from_network_policy')
- def test_ensure_network_policy_with_existing_crd_empty_selector(
- self, m_update, m_create, m_get_crd, m_namespaced, m_affected):
- previous_selector = {}
- pod_selector = {'matchLabels': {'run': 'demo'}}
- updated_policy = self._policy.copy()
- updated_policy['spec']['podSelector'] = pod_selector
- crd_with_empty_selector = self._crd.copy()
- crd_with_empty_selector['spec']['podSelector'] = previous_selector
-
- m_get_crd.return_value = crd_with_empty_selector
- m_update.return_value = previous_selector
-
- self._driver.ensure_network_policy(updated_policy, self._project_id)
-
- m_get_crd.assert_called_once_with(updated_policy)
- m_create.assert_not_called()
- m_update.assert_called_once_with(updated_policy)
- m_affected.assert_called_with(self._policy, previous_selector)
- m_namespaced.assert_not_called()
-
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- '_add_default_np_rules')
+ '_get_default_np_rules')
@mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd')
+ '_get_knp_crd', return_value=False)
@mock.patch.object(network_policy.NetworkPolicyDriver,
- '_add_kuryrnetpolicy_crd')
+ '_create_knp_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'parse_network_policy_rules')
@mock.patch.object(utils, 'get_subnet_cidr')
- def test_create_security_group_rules_from_network_policy(self, m_utils,
- m_parse,
- m_add_crd,
- m_get_crd,
- m_add_default):
- self._driver.os_net.create_security_group.return_value = (
- munch.Munch({'id': mock.sentinel.id,
- 'security_group_rules': []}))
+ def test_ensure_network_policy(self, m_utils, m_parse, m_add_crd,
+ m_get_crd, m_get_default):
m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr
m_parse.return_value = (self._i_rules, self._e_rules)
- self._driver.os_net.create_security_group_rule.return_value = (
- munch.Munch({'id': mock.sentinel.id}))
- self._driver.create_security_group_rules_from_network_policy(
- self._policy, self._project_id)
+ self._driver.ensure_network_policy(
+ self._policy)
m_get_crd.assert_called_once()
m_add_crd.assert_called_once()
- m_add_default.assert_called_once()
+ m_get_default.assert_called_once()
@mock.patch.object(network_policy.NetworkPolicyDriver,
- '_add_default_np_rules')
+ '_get_default_np_rules')
@mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- '_add_kuryrnetpolicy_crd')
+ '_get_knp_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'parse_network_policy_rules')
@mock.patch.object(utils, 'get_subnet_cidr')
- def test_create_security_group_rules_with_k8s_exc(self, m_utils, m_parse,
- m_add_crd, m_get_crd,
- m_add_default):
- self._driver.os_net.create_security_group.return_value = (
- munch.Munch({'id': mock.sentinel.id,
- 'security_group_rules': []}))
+ def test_ensure_network_policy_with_k8s_exc(self, m_utils, m_parse,
+ m_get_crd, m_get_default):
m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr
m_parse.return_value = (self._i_rules, self._e_rules)
m_get_crd.side_effect = exceptions.K8sClientException
- self._driver.os_net.create_security_group_rule.return_value = (
- munch.Munch({'id': mock.sentinel.id}))
- self.assertRaises(
- exceptions.K8sClientException,
- self._driver.create_security_group_rules_from_network_policy,
- self._policy, self._project_id)
- m_add_crd.assert_called_once()
- m_add_default.assert_called_once()
+ self.assertRaises(exceptions.K8sClientException,
+ self._driver.ensure_network_policy, self._policy)
+ m_get_default.assert_called_once()
@mock.patch.object(network_policy.NetworkPolicyDriver,
- '_add_default_np_rules')
+ '_get_default_np_rules')
@mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- '_add_kuryrnetpolicy_crd')
+ '_get_knp_crd', return_value=None)
+ @mock.patch.object(network_policy.NetworkPolicyDriver, '_create_knp_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'parse_network_policy_rules')
@mock.patch.object(utils, 'get_subnet_cidr')
- def test_create_security_group_rules_error_add_crd(self, m_utils, m_parse,
- m_add_crd, m_get_crd,
- m_add_default):
- self._driver.os_net.create_security_group.return_value = (
- munch.Munch({'id': mock.sentinel.id,
- 'security_group_rules': []}))
+ def test_ensure_network_policy_error_add_crd(
+ self, m_utils, m_parse, m_add_crd, m_get_crd, m_get_default):
m_utils.get_subnet_cidr.return_value = mock.sentinel.cidr
m_parse.return_value = (self._i_rules, self._e_rules)
m_add_crd.side_effect = exceptions.K8sClientException
- self._driver.os_net.create_security_group_rule.return_value = (
- munch.Munch({'id': mock.sentinel.id}))
- self.assertRaises(
- exceptions.K8sClientException,
- self._driver.create_security_group_rules_from_network_policy,
- self._policy, self._project_id)
- m_get_crd.assert_not_called()
- m_add_default.assert_called_once()
-
- def test_create_security_group_rules_with_n_exc(self):
- self._driver.os_net.create_security_group.side_effect = (
- os_exc.SDKException())
- self.assertRaises(
- os_exc.SDKException,
- self._driver.create_security_group_rules_from_network_policy,
- self._policy, self._project_id)
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'parse_network_policy_rules')
- def test_update_security_group_rules(self, m_parse, m_get_crd,
- m_create_sgr):
- policy = self._policy.copy()
- policy['spec']['podSelector'] = {'matchLabels': {'test': 'test'}}
- m_get_crd.return_value = self._crd
- m_parse.return_value = (self._i_rules, self._e_rules)
- self._driver.update_security_group_rules_from_network_policy(
- policy)
- m_parse.assert_called_with(policy, self._sg_id)
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'get_kuryrnetpolicy_crd')
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- 'parse_network_policy_rules')
- def test_update_security_group_rules_with_k8s_exc(self, m_parse, m_get_crd,
- m_create_sgr):
- self._driver.kubernetes.patch_crd.side_effect = (
- exceptions.K8sClientException())
- m_get_crd.return_value = self._crd
- m_parse.return_value = (self._i_rules, self._e_rules)
- self.assertRaises(
- exceptions.K8sClientException,
- self._driver.update_security_group_rules_from_network_policy,
- self._policy)
- m_parse.assert_called_with(self._policy, self._sg_id)
+ self.assertRaises(exceptions.K8sClientException,
+ self._driver.ensure_network_policy, self._policy)
+ m_get_crd.assert_called()
+ m_get_default.assert_called_once()
def test_get_namespaces(self):
namespace_selector = {'namespaceSelector': {
@@ -363,6 +246,13 @@ def test_get_namespaces_no_matches(self):
self.assertEqual([], resp)
self.kubernetes.get.assert_called_once()
+ def test_get_from_old_crd(self):
+ knp = self._driver.get_from_old_crd(self.old_crd)
+ self.assertEqual(self.crd['spec'], knp['spec'])
+ self.assertEqual(self.crd['status'], knp['status'])
+ for k in ['name', 'namespace']:
+ self.assertEqual(self.crd['metadata'][k], knp['metadata'][k])
+
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_resource_details')
@@ -377,7 +267,7 @@ def test_parse_network_policy_rules_with_rules(
namespace = 'myproject'
m_get_namespaces.return_value = [get_namespace_obj()]
m_get_resource_details.return_value = subnet_cidr, namespace
- self._driver.parse_network_policy_rules(self._policy, self._sg_id)
+ self._driver.parse_network_policy_rules(self._policy)
m_get_namespaces.assert_called()
m_get_resource_details.assert_called()
m_create.assert_called()
@@ -391,12 +281,12 @@ def test_parse_network_policy_rules_with_no_rules(self, m_create,
policy = self._policy.copy()
policy['spec']['ingress'] = [{}]
policy['spec']['egress'] = [{}]
- self._driver.parse_network_policy_rules(policy, self._sg_id)
+ self._driver.parse_network_policy_rules(policy)
m_get_ns.assert_not_called()
- calls = [mock.call(self._sg_id, 'ingress', ethertype='IPv4'),
- mock.call(self._sg_id, 'ingress', ethertype='IPv6'),
- mock.call(self._sg_id, 'egress', ethertype='IPv4'),
- mock.call(self._sg_id, 'egress', ethertype='IPv6')]
+ calls = [mock.call('ingress', ethertype='IPv4'),
+ mock.call('ingress', ethertype='IPv6'),
+ mock.call('egress', ethertype='IPv4'),
+ mock.call('egress', ethertype='IPv6')]
m_create.assert_has_calls(calls)
@mock.patch.object(network_policy.NetworkPolicyDriver,
@@ -408,7 +298,7 @@ def test_parse_network_policy_rules_with_no_pod_selector(
[{'port': 6379, 'protocol': 'TCP'}]}]
policy['spec']['egress'] = [{'ports':
[{'port': 6379, 'protocol': 'TCP'}]}]
- self._driver.parse_network_policy_rules(policy, self._sg_id)
+ self._driver.parse_network_policy_rules(policy)
m_create_all_pods_sg_rules.assert_called()
@mock.patch.object(network_policy.NetworkPolicyDriver,
@@ -429,7 +319,7 @@ def test_parse_network_policy_rules_with_ipblock(self,
'TCP'}],
'to': [{'ipBlock':
{'cidr': '10.0.0.0/24'}}]}]
- self._driver.parse_network_policy_rules(policy, self._sg_id)
+ self._driver.parse_network_policy_rules(policy)
m_create_sg_rule.assert_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
@@ -450,38 +340,19 @@ def test_parse_network_policy_rules_with_no_ports(
selectors = {'namespaceSelector': {
'matchLabels': {
'project': 'myproject'}}}
- policy['spec']['egress'] = [
- {'to':
- [selectors]}]
- policy['spec']['ingress'] = [
- {'from':
- [selectors]}]
- selectors = {'namespace_selector': selectors['namespaceSelector']}
- self._driver.parse_network_policy_rules(policy, self._sg_id)
+ policy['spec']['egress'] = [{'to': [selectors]}]
+ policy['spec']['ingress'] = [{'from': [selectors]}]
+ self._driver.parse_network_policy_rules(policy)
m_get_namespaces.assert_called()
m_get_resource_details.assert_called()
- calls = [mock.call(self._sg_id, 'ingress', port_range_min=1,
+ calls = [mock.call('ingress', port_range_min=1,
port_range_max=65535, cidr=subnet_cidr,
namespace=namespace),
- mock.call(self._sg_id, 'egress', port_range_min=1,
+ mock.call('egress', port_range_min=1,
port_range_max=65535, cidr=subnet_cidr,
namespace=namespace)]
m_create.assert_has_calls(calls)
- def test_knps_on_namespace(self):
- self.kubernetes.get.return_value = {'items': ['not-empty']}
- namespace = 'test1'
-
- resp = self._driver.knps_on_namespace(namespace)
- self.assertTrue(resp)
-
- def test_knps_on_namespace_empty(self):
- self.kubernetes.get.return_value = {'items': []}
- namespace = 'test1'
-
- resp = self._driver.knps_on_namespace(namespace)
- self.assertFalse(resp)
-
@mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
def test_affected_pods(self, m_namespaced):
self._driver.affected_pods(self._policy)
@@ -509,19 +380,10 @@ def test_namespaced_pods(self):
self.assertEqual([], resp)
@mock.patch.object(network_policy.NetworkPolicyDriver,
- '_del_kuryrnetpolicy_crd', return_value=False)
+ '_del_knp_crd', return_value=False)
def test_release_network_policy(self, m_del_crd):
- self._driver.release_network_policy(self._crd)
- self.neutron.delete_security_group.assert_called_once_with(
- self._crd['spec']['securityGroupId'])
- m_del_crd.assert_called_once_with(self._crd['metadata']['name'],
- self._crd['metadata']['namespace'])
-
- @mock.patch.object(network_policy.NetworkPolicyDriver,
- '_del_kuryrnetpolicy_crd', return_value=False)
- def test_release_network_policy_removed_crd(self, m_del_crd):
- self._driver.release_network_policy(None)
- m_del_crd.assert_not_called()
+ self._driver.release_network_policy(self.crd)
+ m_del_crd.assert_called_once_with(self.crd)
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_create_sg_rules_with_container_ports')
@@ -543,8 +405,7 @@ def test__create_sg_rule_body_on_text_port_ingress(self,
m_get_pods.return_value = {'items': [pod]}
m_get_ports.return_value = container_ports
- self._driver._create_sg_rule_body_on_text_port(self._sg_id,
- direction,
+ self._driver._create_sg_rule_body_on_text_port(direction,
port,
resources,
crd_rules,
@@ -577,8 +438,7 @@ def test__create_sg_rule_body_on_text_port_ingress_all(self,
m_get_pods.return_value = {'items': [pod]}
m_get_ports.return_value = container_ports
- self._driver._create_sg_rule_body_on_text_port(self._sg_id,
- direction,
+ self._driver._create_sg_rule_body_on_text_port(direction,
port,
resources,
crd_rules,
@@ -600,7 +460,7 @@ def test__create_sg_rule_body_on_text_port_ingress_match(self,
m_create_sgr):
def _create_sgr_cont(container_ports, allow_all, resource,
- matched_pods, crd_rules, sg_id, direction, port,
+ matched_pods, crd_rules, direction, port,
pod_selector=None, policy_namespace=None):
matched_pods[container_ports[0][1]] = 'foo'
@@ -617,8 +477,7 @@ def _create_sgr_cont(container_ports, allow_all, resource,
m_get_pods.return_value = {'items': [pod]}
m_get_ports.return_value = container_ports
- self._driver._create_sg_rule_body_on_text_port(self._sg_id,
- direction,
+ self._driver._create_sg_rule_body_on_text_port(direction,
port,
resources,
crd_rules,
@@ -629,7 +488,7 @@ def _create_sgr_cont(container_ports, allow_all, resource,
m_get_pods.assert_called_with(pod_selector, namespace)
m_get_ports.assert_called_with(pod, port)
- calls = [mock.call(self._sg_id, direction, container_ports[0][1],
+ calls = [mock.call(direction, container_ports[0][1],
protocol=port['protocol'], ethertype=e,
pods='foo') for e in ('IPv4', 'IPv6')]
@@ -656,8 +515,7 @@ def test__create_sg_rule_body_on_text_port_egress(self,
m_get_pods.return_value = {'items': [pod]}
m_get_ports.return_value = container_ports
- self._driver._create_sg_rule_body_on_text_port(self._sg_id,
- direction,
+ self._driver._create_sg_rule_body_on_text_port(direction,
port,
resources,
crd_rules,
@@ -685,8 +543,7 @@ def test__create_sg_rule_body_on_text_port_egress_all(self,
m_get_ports.return_value = container_ports
- self._driver._create_sg_rule_body_on_text_port(self._sg_id,
- direction,
+ self._driver._create_sg_rule_body_on_text_port(direction,
port,
resources,
crd_rules,
@@ -695,8 +552,8 @@ def test__create_sg_rule_body_on_text_port_egress_all(self,
allow_all=True)
m_get_ports.assert_called_with(resources[0], port)
- m_create_sgr.assert_called_once_with(self._sg_id, 'egress', None,
- cidr=mock.ANY, protocol='TCP')
+ m_create_sgr.assert_called_once_with('egress', None, cidr=mock.ANY,
+ protocol='TCP')
self.assertEqual(len(crd_rules), 1)
@mock.patch('kuryr_kubernetes.utils.get_subnet_cidr')
@@ -731,8 +588,7 @@ def _create_sgr_cont(container_ports, allow_all, resource,
m_get_pods.return_value = {'items': [pod]}
m_get_ports.return_value = container_ports
- self._driver._create_sg_rule_body_on_text_port(self._sg_id,
- direction,
+ self._driver._create_sg_rule_body_on_text_port(direction,
port,
resources,
crd_rules,
@@ -741,10 +597,10 @@ def _create_sgr_cont(container_ports, allow_all, resource,
allow_all=True)
m_get_ports.assert_called_with(resources[0], port)
- calls = [mock.call(self._sg_id, direction, container_ports[0][1],
+ calls = [mock.call(direction, container_ports[0][1],
protocol=port['protocol'], ethertype=e,
pods='foo') for e in ('IPv4', 'IPv6')]
- calls.append(mock.call(self._sg_id, direction, container_ports[0][1],
+ calls.append(mock.call(direction, container_ports[0][1],
protocol=port['protocol'],
cidr='10.0.0.128/26'))
m_create_sgr.assert_has_calls(calls)
@@ -758,19 +614,18 @@ def test__create_all_pods_sg_rules(self):
direction = 'ingress'
rules = []
- self._driver._create_all_pods_sg_rules(port, self._sg_id, direction,
- rules, '', None)
+ self._driver._create_all_pods_sg_rules(port, direction, rules, '',
+ None)
self.assertEqual(len(rules), 2)
def test__create_default_sg_rule(self):
for direction in ('ingress', 'egress'):
rules = []
- self._driver._create_default_sg_rule(self._sg_id, direction, rules)
+ self._driver._create_default_sg_rule(direction, rules)
self.assertEqual(len(rules), 2)
- self.assertListEqual(rules, [{'security_group_rule': {
+ self.assertListEqual(rules, [{'sgRule': {
'ethertype': e,
- 'security_group_id': self._sg_id,
'direction': direction,
'description': 'Kuryr-Kubernetes NetPolicy SG rule'
}} for e in ('IPv4', 'IPv6')])
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py
index a9e1a116e..15e789a74 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy_security_groups.py
@@ -66,7 +66,7 @@ def get_sg_rule():
pod_ip = get_match_crd_pod_obj()['status'].get('podIP')
return {
"namespace": 'dev',
- "security_group_rule": {
+ "sgRule": {
"description": "Kuryr-Kubernetes NetPolicy SG rule",
"direction": "ingress",
"ethertype": "IPv4",
@@ -80,7 +80,7 @@ def get_sg_rule():
def get_matched_crd_obj():
return {
- "kind": "KuryrNetPolicy",
+ "kind": "KuryrNetworkPolicy",
"metadata": {"name": "np-test-network-policy",
"namespace": "default"},
"spec": {
@@ -159,7 +159,7 @@ def setUp(self):
'selfLink': mock.sentinel.selfLink},
'spec': {
'egressSgRules': [
- {'security_group_rule':
+ {'sgRule':
{'description': 'Kuryr-Kubernetes NetPolicy SG rule',
'direction': 'egress',
'ethertype': 'IPv4',
@@ -170,7 +170,7 @@ def setUp(self):
'id': mock.sentinel.id
}}],
'ingressSgRules': [
- {'security_group_rule':
+ {'sgRule':
{'description': 'Kuryr-Kubernetes NetPolicy SG rule',
'direction': 'ingress',
'ethertype': 'IPv4',
@@ -189,16 +189,18 @@ def setUp(self):
'production']}],
'matchLabels': {
'run': 'demo'
- }},
+ }}},
+ 'status': {
'securityGroupId': self._sg_id,
- 'securityGroupName': mock.sentinel.sg_name}}
+ },
+ }
self._crd2 = {
'metadata': {'name': mock.sentinel.name3,
'selfLink': mock.sentinel.selfLink},
'spec': {
'ingressSgRules': [
- {'security_group_rule':
+ {'sgRule':
{'description': 'Kuryr-Kubernetes NetPolicy SG rule',
'direction': 'ingress',
'ethertype': 'IPv4',
@@ -208,25 +210,14 @@ def setUp(self):
'security_group_id': self._sg_id2,
'id': mock.sentinel.id
}}],
- 'podSelector': {},
+ 'podSelector': {}},
+ 'status': {
'securityGroupId': self._sg_id2,
'securityGroupName': mock.sentinel.sg_name}}
- self._crds = {
- "apiVersion": "v1",
- "items": [self._crd],
- "kind": "List",
- "metadata": {
- "resourceVersion": "",
- "selfLink": mock.sentinel.selfLink}}
-
- self._multiple_crds = {
- "apiVersion": "v1",
- "items": [self._crd, self._crd2],
- "kind": "List",
- "metadata": {
- "resourceVersion": "",
- "selfLink": mock.sentinel.selfLink}}
+ self._crds = [self._crd]
+
+ self._multiple_crds = [self._crd, self._crd2]
self._pod = {
'apiVersion': 'v1',
@@ -304,7 +295,7 @@ def setUp(self):
self._crd_sg_id = mock.sentinel.crd_sg_id
self._sg_rule_body = {
- 'security_group_rule': {
+ 'sgRule': {
'direction': 'ingress',
'protocol': 'tcp',
'description': 'Kuryr-Kubernetes NetPolicy SG rule',
@@ -323,7 +314,7 @@ def setUp(self):
"spec": {
"egressSgRules": [],
"ingressSgRules": [{
- "security_group_rule": {
+ "sgRule": {
"description": "Kuryr-Kubernetes NetPolicy SG rule",
"direction": "ingress",
"ethertype": "IPv4",
@@ -348,20 +339,12 @@ def setUp(self):
"podSelector": {"matchLabels": {"app": "demo"}},
"securityGroupId": self._crd_sg_id}}
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule_body')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'match_selector', return_value=True)
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_ip')
def test__create_sg_rules(self, m_get_pod_ip,
- m_match_selector,
- m_create_sg_rule_body,
- m_create_sg_rule):
- m_create_sg_rule_body.return_value = self._sg_rule_body
+ m_match_selector):
sgr_id = mock.sentinel.sgr_id
- m_create_sg_rule.return_value = sgr_id
crd = get_crd_obj_with_all_selectors()
pod = get_match_crd_pod_obj()
m_get_pod_ip.return_value = pod['status'].get('podIP')
@@ -370,80 +353,58 @@ def test__create_sg_rules(self, m_get_pod_ip,
policy = crd['spec']['networkpolicy_spec']
rule_list = policy.get('ingress', None)
- crd_rules = crd['spec'].get('ingressSgRules')
pod_ns = pod['metadata']['namespace']
for rule_block in rule_list:
for rule in rule_block.get('from', []):
pod_selector = rule.get('podSelector')
matched = network_policy_security_groups._create_sg_rules(
- crd, pod, pod_selector, rule_block,
- crd_rules, 'ingress', matched, pod_ns)
+ crd, pod, pod_selector, rule_block, 'ingress', matched)
new_sg_rule['namespace'] = pod_ns
- new_sg_rule['security_group_rule']['id'] = sgr_id
+ new_sg_rule['sgRule']['id'] = sgr_id
m_match_selector.assert_called_once_with(
pod_selector, pod['metadata']['labels'])
m_get_pod_ip.assert_called_once_with(pod)
- m_create_sg_rule_body.assert_called_once()
- m_create_sg_rule.assert_called_once()
- self.assertEqual([new_sg_rule], crd_rules)
self.assertEqual(matched, True)
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'get_pod_ip')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'match_selector', return_value=False)
- def test__create_sg_rules_no_match(self, m_match_selector,
- m_get_pod_ip):
+ def test__create_sg_rules_no_match(self, m_match_selector, m_get_pod_ip):
crd = get_crd_obj_with_all_selectors()
pod = self._pod2
policy = crd['spec']['networkpolicy_spec']
rule_list = policy.get('ingress', None)
- crd_rules = crd['spec'].get('ingressSgRules')
for rule_block in rule_list:
for rule in rule_block.get('from', []):
pod_selector = rule.get('podSelector')
matched = network_policy_security_groups._create_sg_rules(
- crd, pod, pod_selector, rule_block,
- crd_rules, 'ingress', False, self._namespace)
+ crd, pod, pod_selector, rule_block, 'ingress', False)
self.assertEqual(matched, False)
+ @mock.patch('kuryr_kubernetes.controller.drivers.'
+ 'network_policy_security_groups._bump_networkpolicy')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'patch_kuryrnetworkpolicy_crd')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'delete_security_group_rule')
+ 'get_kuryrnetworkpolicy_crds')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_ip')
- def test_delete_sg_rules(self, m_get_pod_ip, m_delete_sg_rule,
- m_get_knp_crds, m_patch_kuryrnetworkpolicy_crd):
+ def test_delete_sg_rules(self, m_get_pod_ip, m_get_knp_crds, m_bump):
crd = self._crd_with_rule
- i_rule = crd['spec'].get('ingressSgRules')[0]
- sgr_id = i_rule['security_group_rule'].get('id')
m_get_pod_ip.return_value = self._pod_ip
- m_get_knp_crds.return_value = {
- "apiVersion": "v1",
- "items": [crd],
- "kind": "List",
- "metadata": {
- "resourceVersion": "",
- "selfLink": mock.sentinel.selfLink}}
- i_rules = e_rules = []
+ m_get_knp_crds.return_value = [crd]
pod = self._pod_dev_namespace
self._driver.delete_sg_rules(pod)
m_get_knp_crds.assert_called_once()
m_get_pod_ip.assert_called_once_with(pod)
- m_delete_sg_rule.assert_called_once_with(sgr_id)
- m_patch_kuryrnetworkpolicy_crd.assert_called_with(
- crd, i_rules, e_rules, crd['spec'].get('podSelector'))
+ m_bump.assert_called_once()
@mock.patch('kuryr_kubernetes.config.CONF')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
+ 'get_kuryrnetworkpolicy_crds')
def test_get_sgs_for_pod_without_label(self, m_get_crds, m_cfg):
m_get_crds.return_value = self._crds
sg_list = [str(mock.sentinel.sg_id)]
@@ -460,7 +421,7 @@ def test_get_sgs_for_pod_without_label(self, m_get_crds, m_cfg):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'match_labels')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
+ 'get_kuryrnetworkpolicy_crds')
def test_get_sgs_for_pod_with_label(self, m_get_crds, m_match_labels,
m_match_expressions):
m_get_crds.return_value = self._crds
@@ -474,7 +435,7 @@ def test_get_sgs_for_pod_with_label(self, m_get_crds, m_match_labels,
self._crd['spec']['podSelector']['matchExpressions'], pod_labels)
m_match_labels.assert_called_once_with(
self._crd['spec']['podSelector']['matchLabels'], pod_labels)
- self.assertEqual(resp, [str(self._sg_id)])
+ self.assertEqual(resp, [self._sg_id])
@mock.patch('kuryr_kubernetes.config.CONF')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
@@ -482,7 +443,7 @@ def test_get_sgs_for_pod_with_label(self, m_get_crds, m_match_labels,
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'match_labels')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
+ 'get_kuryrnetworkpolicy_crds')
def test_get_sgs_for_pod_with_label_no_match(self, m_get_crds,
m_match_labels,
m_match_expressions, m_cfg):
@@ -503,9 +464,9 @@ def test_get_sgs_for_pod_with_label_no_match(self, m_get_crds,
self.assertEqual(sg_list, sgs)
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
+ 'get_kuryrnetworkpolicy_crds')
def test_get_sgs_no_crds(self, m_get_crds):
- m_get_crds.return_value = {"items": []}
+ m_get_crds.return_value = []
cfg.CONF.set_override('pod_security_groups', [],
group='neutron_defaults')
@@ -519,7 +480,7 @@ def test_get_sgs_no_crds(self, m_get_crds):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'match_labels')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
+ 'get_kuryrnetworkpolicy_crds')
def test_get_sgs_multiple_crds(self, m_get_crds, m_match_labels,
m_match_expressions):
m_match_expressions.return_value = True
@@ -529,87 +490,64 @@ def test_get_sgs_multiple_crds(self, m_get_crds, m_match_labels,
resp = self._driver.get_security_groups(self._pod, self._project_id)
m_get_crds.assert_called_once_with(namespace=self._namespace)
- self.assertEqual([str(self._sg_id), str(self._sg_id2)], resp)
+ self.assertEqual([self._sg_id, self._sg_id2], resp)
+ @mock.patch('kuryr_kubernetes.controller.drivers.'
+ 'network_policy_security_groups._bump_networkpolicy')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'patch_kuryrnetworkpolicy_crd')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'delete_security_group_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
- def test_delete_namespace_sg_rule(self, m_get_knp_crd, m_delete_sg_rule,
- m_patch_kuryrnetworkpolicy_crd):
+ 'get_kuryrnetworkpolicy_crds')
+ def test_delete_namespace_sg_rule(self, m_get_knp_crd, m_bump):
cls = network_policy_security_groups.NetworkPolicySecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
- i_rule = get_matched_crd_obj()['spec']['ingressSgRules'][0]
- sg_rule_id = i_rule.get('security_group_rule')['id']
- m_get_knp_crd.return_value = {"items": [get_matched_crd_obj()]}
+ m_get_knp_crd.return_value = [get_matched_crd_obj()]
cls.delete_namespace_sg_rules(m_driver, get_match_crd_namespace_obj())
m_get_knp_crd.assert_called_once()
- m_delete_sg_rule.assert_called_once_with(sg_rule_id)
- m_patch_kuryrnetworkpolicy_crd.assert_called_once()
+ m_bump.assert_called_once()
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'patch_kuryrnetworkpolicy_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.'
+ 'network_policy_security_groups._bump_networkpolicy')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
'delete_security_group_rule')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_kuryrnetpolicy_crds')
- def test_delete_namespace_sg_rule_no_match(self, m_get_knp_crd,
- m_delete_sg_rule,
- m_patch_kuryrnetworkpolicy_crd):
+ 'get_kuryrnetworkpolicy_crds')
+ def test_delete_namespace_sg_rule_no_match(
+ self, m_get_knp_crd, m_delete_sg_rule, m_bump):
cls = network_policy_security_groups.NetworkPolicySecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
- m_get_knp_crd.return_value = {"items": [get_matched_crd_obj()]}
+ m_get_knp_crd.return_value = [get_matched_crd_obj()]
cls.delete_namespace_sg_rules(m_driver,
get_no_match_crd_namespace_obj())
m_get_knp_crd.assert_called_once()
m_delete_sg_rule.assert_not_called()
- m_patch_kuryrnetworkpolicy_crd.assert_not_called()
+ m_bump.assert_not_called()
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_pods')
- @mock.patch('kuryr_kubernetes.controller.drivers.'
- 'network_policy_security_groups._create_sg_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'match_selector')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_namespace_subnet_cidr')
- def test__parse_rules(self, m_get_ns_subnet_cidr, m_match_selector,
- m_create_sg_rule, m_get_pods):
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.match_selector')
+ def test__parse_rules(self, m_match_selector, m_get_pods):
crd = get_crd_obj_no_match()
policy = crd['spec']['networkpolicy_spec']
i_rule = policy.get('ingress')[0]
ns_selector = i_rule['from'][0].get('namespaceSelector')
ns = get_match_crd_namespace_obj()
- m_get_ns_subnet_cidr.return_value = '10.0.2.0/26'
m_match_selector.return_value = True
- m_create_sg_rule.return_value = get_sg_rule()
- matched, rules = network_policy_security_groups._parse_rules(
- 'ingress', crd, namespace=ns)
+ matched = network_policy_security_groups._parse_rules(
+ 'ingress', crd, policy, namespace=ns)
- m_get_ns_subnet_cidr.assert_called_once_with(ns)
m_match_selector.assert_called_once_with(ns_selector,
ns['metadata']['labels'])
- m_create_sg_rule.assert_called_once()
self.assertEqual(matched, True)
- self.assertEqual(rules, [get_sg_rule()])
- @mock.patch('kuryr_kubernetes.controller.drivers.'
- 'network_policy_security_groups._create_sg_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'match_selector')
- def test__parse_rules_no_match(self, m_match_selector,
- m_create_sg_rule):
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.match_selector')
+ def test__parse_rules_no_match(self, m_match_selector):
crd = get_crd_obj_no_match()
policy = crd['spec']['networkpolicy_spec']
i_rule = policy.get('ingress')[0]
@@ -618,26 +556,19 @@ def test__parse_rules_no_match(self, m_match_selector,
m_match_selector.return_value = False
- matched, rules = network_policy_security_groups._parse_rules(
- 'ingress', crd, namespace=ns)
+ matched = network_policy_security_groups._parse_rules(
+ 'ingress', crd, policy, namespace=ns)
m_match_selector.assert_called_once_with(ns_selector,
ns['metadata']['labels'])
- m_create_sg_rule.assert_not_called()
self.assertEqual(matched, False)
- self.assertEqual(rules, [])
- @mock.patch('kuryr_kubernetes.controller.drivers.'
- 'network_policy_security_groups._create_sg_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_pod_ip')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_pods')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'match_selector')
- def test__parse_rules_all_selectors(self, m_match_selector, m_get_pods,
- m_get_pod_ip, m_create_sg_rule):
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pods')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_ip')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.match_selector')
+ def test__parse_rules_all_selectors(self, m_match_selector, m_get_pod_ip,
+ m_get_pods):
crd = get_crd_obj_with_all_selectors()
policy = crd['spec']['networkpolicy_spec']
i_rule = policy.get('ingress')[0]
@@ -647,22 +578,19 @@ def test__parse_rules_all_selectors(self, m_match_selector, m_get_pods,
pod = get_match_crd_pod_obj()
m_match_selector.return_value = True
- m_get_pods.return_value = {"items": [pod]}
m_get_pod_ip.return_value = pod['status']['podIP']
- m_create_sg_rule.return_value = get_sg_rule()
+ m_get_pods.return_value = {"items": [pod]}
- matched, rules = network_policy_security_groups._parse_rules(
- 'ingress', crd, namespace=ns)
+ matched = network_policy_security_groups._parse_rules(
+ 'ingress', crd, policy, namespace=ns)
m_match_selector.assert_called_once_with(ns_selector,
ns['metadata']['labels'])
m_get_pods.assert_called_once_with(pod_selector,
ns['metadata']['name'])
m_get_pod_ip.assert_called_once_with(pod)
- m_create_sg_rule.assert_called_once()
self.assertEqual(matched, True)
- self.assertEqual(rules, [get_sg_rule()])
@mock.patch('kuryr_kubernetes.controller.drivers.'
'network_policy_security_groups._parse_selectors_on_pod')
@@ -670,124 +598,26 @@ def test__parse_rules_multiple_selectors(self, m_parse_selectors_on_pod):
no_selector = None
matched_selector = True
pod = mock.sentinel.pod
- final_crd_rules = [mock.sentinel.crd_rules]
- m_parse_selectors_on_pod.side_effect = [
- (matched_selector, final_crd_rules)]*2
+ m_parse_selectors_on_pod.side_effect = [matched_selector]*2
- initial_crd_rules = []
direction = "ingress"
pod_selector = mock.sentinel.pod_selector
namespace_selector = mock.sentinel.namespace_selector
rule_block = {'from': [{'podSelector': pod_selector},
{'namespaceSelector': namespace_selector}]}
- crd = {"spec": {
- "ingressSgRules": initial_crd_rules,
- "networkpolicy_spec": {
- "ingress": [rule_block],
- "policyTypes": [
- "Ingress"
- ]}, }}
+ policy = {
+ "ingress": [rule_block],
+ "policyTypes": ["Ingress"]
+ }
+ crd = {"spec": {"ingressSgRules": []}}
- matched, rules = network_policy_security_groups._parse_rules(
- direction, crd, pod=pod)
+ matched = network_policy_security_groups._parse_rules(
+ direction, crd, policy, pod=pod)
calls = [mock.call(crd, pod, pod_selector, no_selector, rule_block,
- initial_crd_rules, direction, not matched_selector),
+ direction, not matched_selector),
mock.call(crd, pod, no_selector, namespace_selector,
- rule_block, final_crd_rules, direction,
- matched_selector)]
+ rule_block, direction, matched_selector)]
m_parse_selectors_on_pod.assert_has_calls(calls)
self.assertEqual(matched, matched_selector)
- self.assertEqual(rules, final_crd_rules)
-
-
-class TestNetworkPolicySecurityGroupsFunctions(test_base.TestCase):
-
- def setUp(self):
- super().setUp()
- self.kubernetes = self.useFixture(k_fix.MockK8sClient()).client
- self.npsg = network_policy_security_groups
- self.sg_id = mock.sentinel.sg_id
-
- self.crd = {
- 'spec': {
- 'ingressSgRules': [],
- 'networkpolicy_spec': {
- 'ingress': [],
- 'policyTypes': ['Ingress']
- }
- },
- 'metadata': {'namespace': 'ns'}
- }
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule_body')
- def test__apply_sg_rules_on_matched_pods_empty_match(self, m_create_sgrb,
- m_create_sgr):
- self.npsg._apply_sg_rules_on_matched_pods({}, self.sg_id, 'ingress',
- 'ns', 'port', 'crd_rules')
-
- m_create_sgrb.assert_not_called()
- m_create_sgr.assert_not_called()
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_namespace_subnet_cidr')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_namespace')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule_body')
- def test__apply_sg_rules_on_matched_pods_not_all(self, m_create_sgrb,
- m_create_sgr, m_get_ns,
- m_get_ns_sub_cidr):
- pod = mock.sentinel.pod
- ns = mock.sentinel.ns
- port = {'protocol': 'TCP', 'port': 22}
- matched_pods = {'container_port': [pod]}
-
- m_get_ns.return_value = ns
- m_create_sgrb.return_value = {'security_group_rule': {}}
- crd_rules = []
- direction = 'ingress'
-
- self.npsg._apply_sg_rules_on_matched_pods(matched_pods, self.sg_id,
- direction, 'ns', port,
- crd_rules)
-
- m_get_ns_sub_cidr.assert_called_once_with(ns)
- m_create_sgrb.assert_called_once_with(self.sg_id, direction,
- 'container_port',
- protocol=mock.ANY, cidr=mock.ANY,
- pods=[pod])
- m_create_sgr.assert_called_once()
- self.assertEqual(len(crd_rules), 1)
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_namespace_subnet_cidr')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'get_namespace')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'create_security_group_rule')
- def test__apply_sg_rules_on_matched_pods_all(self, m_create_sgr, m_get_ns,
- m_get_ns_sub_cidr):
- pod = mock.sentinel.pod
- ns = mock.sentinel.ns
- port = {'protocol': 'TCP', 'port': 22}
- matched_pods = {'container_port': [pod]}
-
- m_get_ns.return_value = ns
- crd_rules = []
- direction = 'ingress'
-
- self.npsg._apply_sg_rules_on_matched_pods(matched_pods, self.sg_id,
- direction, 'ns', port,
- crd_rules, allow_all=True)
-
- self.assertEqual(m_create_sgr.call_count, 2)
- self.assertEqual(len(crd_rules), 2)
- self.assertListEqual([r['security_group_rule']['ethertype']
- for r in crd_rules], ['IPv4', 'IPv6'])
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py
index 01cd5277e..f51f01558 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py
@@ -20,7 +20,6 @@
import munch
from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg
-from oslo_serialization import jsonutils
from os_vif.objects import vif as osv_vif
@@ -29,7 +28,6 @@
from kuryr_kubernetes.controller.drivers import neutron_vif
from kuryr_kubernetes.controller.drivers import vif_pool
from kuryr_kubernetes import exceptions
-from kuryr_kubernetes.objects import vif
from kuryr_kubernetes import os_vif_util as ovu
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests import fake
@@ -276,7 +274,8 @@ def test_release_vif(self):
m_driver._return_ports_to_pool.assert_not_called()
- def test__get_in_use_ports(self):
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_vifs')
+ def test__get_in_use_ports(self, get_vifs):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
@@ -284,10 +283,7 @@ def test__get_in_use_ports(self):
pod = get_pod_obj()
port_id = str(uuid.uuid4())
pod_vif = osv_vif.VIFBase(id=port_id)
- pod_state = vif.PodState(default_vif=pod_vif)
-
- pod['metadata']['annotations'][constants.K8S_ANNOTATION_VIF] = (
- jsonutils.dumps(pod_state.obj_to_primitive()))
+ get_vifs.return_value = {'eth0': pod_vif}
items = [pod]
kubernetes.get.return_value = {'items': items}
@@ -295,20 +291,6 @@ def test__get_in_use_ports(self):
self.assertEqual(resp, [port_id])
- def test__get_in_use_ports_exception(self):
- cls = vif_pool.BaseVIFPool
- m_driver = mock.MagicMock(spec=cls)
-
- kubernetes = self.useFixture(k_fix.MockK8sClient()).client
- pod = get_pod_obj()
- del pod['metadata']['annotations'][constants.K8S_ANNOTATION_VIF]
- items = [pod]
- kubernetes.get.return_value = {'items': items}
-
- resp = cls._get_in_use_ports(m_driver)
-
- self.assertEqual(resp, [])
-
def test__get_in_use_ports_empty(self):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py
new file mode 100644
index 000000000..72c2ec335
--- /dev/null
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrnetworkpolicy.py
@@ -0,0 +1,112 @@
+# Copyright 2020 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import mock
+
+from kuryr_kubernetes.controller.drivers import base as drivers
+from kuryr_kubernetes.controller.handlers import kuryrnetworkpolicy
+from kuryr_kubernetes.tests import base as test_base
+
+
+class TestPolicyHandler(test_base.TestCase):
+
+ @mock.patch.object(drivers.LBaaSDriver, 'get_instance')
+ @mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.clients.get_network_client')
+ @mock.patch('kuryr_kubernetes.clients.get_loadbalancer_client')
+ def setUp(self, m_get_os_lb, m_get_os_net, m_get_k8s, m_get_np,
+ m_get_lbaas):
+ super(TestPolicyHandler, self).setUp()
+
+ self._project_id = mock.sentinel.project_id
+ self._policy_name = 'np-test'
+ self._policy_uid = mock.sentinel.policy_uid
+ self._policy_link = mock.sentinel.policy_link
+
+ self._policy = {
+ 'apiVersion': 'networking.k8s.io/v1',
+ 'kind': 'NetworkPolicy',
+ 'metadata': {
+ 'name': self._policy_name,
+ 'resourceVersion': '2259309',
+ 'generation': 1,
+ 'creationTimestamp': '2018-09-18T14:09:51Z',
+ 'namespace': 'default',
+ 'annotations': {},
+ 'selfLink': self._policy_link,
+ 'uid': self._policy_uid
+ },
+ 'spec': {
+ 'egress': [{'ports': [{'port': 5978, 'protocol': 'TCP'}]}],
+ 'ingress': [{'ports': [{'port': 6379, 'protocol': 'TCP'}]}],
+ 'policyTypes': ['Ingress', 'Egress']
+ }
+ }
+
+ self.k8s = mock.Mock()
+ m_get_k8s.return_value = self.k8s
+ self.m_get_k8s = m_get_k8s
+
+ self.os_net = mock.Mock()
+ m_get_os_net.return_value = self.os_net
+ self.m_get_os_net = m_get_os_net
+
+ self.np_driver = mock.Mock()
+ m_get_np.return_value = self.np_driver
+ self.m_get_np = m_get_np
+
+ self.lbaas_driver = mock.Mock()
+ m_get_lbaas.return_value = self.lbaas_driver
+ self.m_get_lbaas = m_get_lbaas
+
+ self.k8s.get.return_value = {}
+ self.handler = kuryrnetworkpolicy.KuryrNetworkPolicyHandler()
+
+ def _get_knp_obj(self):
+ knp_obj = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrNetworkPolicy',
+ 'metadata': {
+ 'name': 'np-test-network-policy',
+ 'namespace': 'test-1',
+ },
+ 'spec': {
+ 'securityGroupId': 'c1ac16f5-e198-4628-9d84-253c6001be8e',
+ 'securityGroupName': 'sg-test-network-policy'
+ }}
+ return knp_obj
+
+ def test_init(self):
+ self.m_get_k8s.assert_called_once()
+ self.m_get_np.assert_called_once()
+
+ self.assertEqual(self.np_driver, self.handler._drv_policy)
+ self.assertEqual(self.k8s, self.handler.k8s)
+ self.assertEqual(self.os_net, self.handler.os_net)
+ self.assertEqual(self.lbaas_driver, self.handler._drv_lbaas)
+
+ def test_convert(self):
+ self.k8s.get.return_value = {'items': [{
+ 'metadata': {
+ 'selfLink': mock.sentinel.old_self_link,
+ 'namespace': 'ns',
+ }
+ }]}
+ self.np_driver.get_from_old_crd.return_value = mock.sentinel.new_crd
+
+ self.handler._convert_old_crds()
+
+ self.k8s.post.assert_called_once_with(mock.ANY, mock.sentinel.new_crd)
+ self.k8s.delete.assert_called_once_with(mock.sentinel.old_self_link)
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrport.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrport.py
new file mode 100644
index 000000000..ac5d56523
--- /dev/null
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_kuryrport.py
@@ -0,0 +1,751 @@
+# Copyright (c) 2020 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from openstack import exceptions as os_exc
+from os_vif import objects as os_obj
+from oslo_config import cfg
+
+from kuryr_kubernetes import constants
+from kuryr_kubernetes.controller.drivers import multi_vif
+from kuryr_kubernetes.controller.handlers import kuryrport
+from kuryr_kubernetes import exceptions as k_exc
+from kuryr_kubernetes.tests import base as test_base
+
+
+CONF = cfg.CONF
+
+
+class TestKuryrPortHandler(test_base.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self._project_id = mock.sentinel.project_id
+ self._subnets = mock.sentinel.subnets
+ self._security_groups = mock.sentinel.security_groups
+ self._host = mock.sentinel.hostname
+ self._pod_version = mock.sentinel.pod_version
+ self._pod_link = mock.sentinel.pod_link
+ self._kp_version = mock.sentinel.kp_version
+ self._kp_link = mock.sentinel.kp_link
+ self._kp_namespace = mock.sentinel.namespace
+ self._kp_uid = mock.sentinel.kp_uid
+ self._kp_name = 'pod1'
+
+ self._pod = {'metadata': {'resourceVersion': self._pod_version,
+ 'selfLink': self._pod_link,
+ 'name': self._kp_name,
+ 'namespace': self._kp_namespace},
+ 'spec': {'nodeName': self._host}}
+
+ self._kp = {
+ 'metadata': {
+ 'resourceVersion': self._kp_version,
+ 'selfLink': self._kp_link,
+ 'name': self._kp_name,
+ 'namespace': self._kp_namespace,
+ 'labels': {
+ constants.KURYRPORT_LABEL: self._host
+ }
+ },
+ 'spec': {
+ 'podUid': 'deadbeef',
+ 'podNodeName': self._host,
+ 'vifs': {}
+ }
+ }
+ self._vif1 = os_obj.vif.VIFBase()
+ self._vif2 = os_obj.vif.VIFBase()
+ self._vif1.active = False
+ self._vif2.active = False
+ self._vif1.plugin = 'object'
+ self._vif2.plugin = 'object'
+ self._vif1_primitive = self._vif1.obj_to_primitive()
+ self._vif2_primitive = self._vif2.obj_to_primitive()
+ self._vifs_primitive = {'eth0': {'default': True,
+ 'vif': self._vif1_primitive},
+ 'eth1': {'default': False,
+ 'vif': self._vif2_primitive}}
+ self._vifs = {'eth0': {'default': True,
+ 'vif': self._vif1},
+ 'eth1': {'default': False,
+ 'vif': self._vif2}}
+ self._pod_uri = (f"{constants.K8S_API_NAMESPACES}"
+ f"/{self._kp['metadata']['namespace']}/pods/"
+ f"{self._kp['metadata']['name']}")
+ self._driver = multi_vif.NoopMultiVIFDriver()
+
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler.get_vifs')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_no_vifs_create(self, ged, get_k8s_client, get_vifs):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ get_vifs.return_value = True
+
+ kp.on_present(self._kp)
+
+ get_vifs.assert_called_once_with(self._kp)
+
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler.get_vifs')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_getting_vifs_failed(self, ged, get_k8s_client,
+ get_vifs):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ get_vifs.return_value = False
+
+ self.assertFalse(kp.on_present(self._kp))
+
+ get_vifs.assert_called_once_with(self._kp)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present(self, ged, get_k8s_client, activate_vif,
+ update_crd, get_project):
+ ged.return_value = [mock.MagicMock]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ get_project.return_value = self._project_id
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ kp.on_present(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ activate_vif.assert_has_calls([mock.call(self._vif1),
+ mock.call(self._vif2)])
+ update_crd.assert_called_once_with(self._kp, self._vifs)
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_active(self, ged, get_k8s_client):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._vif1.active = True
+ self._vif2.active = True
+ self._kp['spec']['vifs'] = {
+ 'eth0': {'default': True,
+ 'vif': self._vif1.obj_to_primitive()},
+ 'eth1': {'default': False,
+ 'vif': self._vif2.obj_to_primitive()}}
+
+ kp.on_present(self._kp)
+
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_port_not_found(self, ged, get_k8s_client, activate_vif,
+ update_crd):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ activate_vif.side_effect = os_exc.ResourceNotFound()
+
+ kp.on_present(self._kp)
+
+ activate_vif.assert_has_calls([mock.call(self._vif1),
+ mock.call(self._vif2)])
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_pod_not_found(self, ged, get_k8s_client, activate_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.side_effect = k_exc.K8sResourceNotFound(self._pod)
+
+ self.assertRaises(k_exc.K8sResourceNotFound, kp.on_present,
+ self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'release_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_fail_update_crd(self, ged, get_k8s_client,
+ activate_vif, update_crd, get_project,
+ get_sg, release_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ update_crd.side_effect = k_exc.K8sResourceNotFound(self._kp)
+ get_project.return_value = self._project_id
+ get_sg.return_value = self._security_groups
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ kp.on_present(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'release_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_exception_during_update_crd(self, ged, get_k8s_client,
+ activate_vif,
+ update_crd, get_project,
+ get_sg, release_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ update_crd.side_effect = k_exc.K8sClientException()
+ get_project.return_value = self._project_id
+ get_sg.return_value = self._security_groups
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ self.assertRaises(k_exc.ResourceNotReady, kp.on_present, self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ update_crd.assert_called_once_with(self._kp, self._vifs)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
+ 'update_port_pci_info')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_sriov(self, ged, get_k8s_client, update_port_pci_info,
+ activate_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._vif2.plugin = constants.KURYR_VIF_TYPE_SRIOV
+ self._vif2.active = True
+ self._kp['spec']['vifs'] = {
+ 'eth0': {'default': True,
+ 'vif': self._vif2.obj_to_primitive()},
+ 'eth1': {'default': False,
+ 'vif': self._vif1.obj_to_primitive()}}
+ CONF.set_override('enable_node_annotations', True, group='sriov')
+ self.addCleanup(CONF.clear_override, 'enable_node_annotations',
+ group='sriov')
+ activate_vif.side_effect = os_exc.ResourceNotFound()
+
+ kp.on_present(self._kp)
+
+ update_port_pci_info.assert_called_once_with(self._host, self._vif2)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_services')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.create_sg_rules')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.'
+ 'ServiceSecurityGroupsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.LBaaSDriver.'
+ 'get_instance')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'activate_vif')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._is_network_policy_enabled')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_present_np(self, ged, is_np_enabled, get_k8s_client,
+ activate_vif, update_crd, get_lb_instance,
+ get_sg_instance, create_sgr, update_services,
+ get_services, get_project):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ kp.on_present(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ activate_vif.assert_has_calls([mock.call(self._vif1),
+ mock.call(self._vif2)])
+ update_crd.assert_called_once_with(self._kp, self._vifs)
+ create_sgr.assert_called_once_with(self._pod)
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_finalize_exception_on_pod(self, ged, k8s):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.side_effect = k_exc.K8sResourceNotFound(self._pod)
+
+ self.assertRaises(k_exc.K8sResourceNotFound, kp.on_finalize,
+ self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+ k8s.remove_finalizer.assert_called_once_with(
+ self._kp, constants.POD_FINALIZER)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_finalize_host_net_or_no_nodename(self, ged, k8s,
+ is_host_network):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ is_host_network.return_value = False
+ _pod = dict(self._pod)
+ del _pod['spec']['nodeName']
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = _pod
+
+ kp.on_finalize(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ is_host_network.assert_called_once_with(self._pod)
+ is_host_network.reset_mock()
+
+ is_host_network.return_value = False
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ kp.on_finalize(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+
+ is_host_network.assert_called_once_with(self._pod)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'release_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.delete_sg_rules')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_finalize_crd_sg_exceptions(self, ged, k8s, is_host_network,
+ get_project, delete_sg_rules,
+ get_sg, release_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ is_host_network.return_value = False
+ get_project.return_value = self._project_id
+ delete_sg_rules.side_effect = k_exc.ResourceNotReady(self._pod)
+ get_sg.side_effect = k_exc.ResourceNotReady(self._pod)
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ kp.on_finalize(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+ k8s.remove_finalizer.assert_has_calls(
+ [mock.call(self._pod, constants.POD_FINALIZER),
+ mock.call(self._kp, constants.KURYRPORT_FINALIZER)])
+ is_host_network.assert_called_once_with(self._pod)
+ delete_sg_rules.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ release_vif.assert_has_calls([mock.call(self._pod, self._vif1,
+ self._project_id, []),
+ mock.call(self._pod, self._vif2,
+ self._project_id, [])])
+
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_services')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.'
+ 'ServiceSecurityGroupsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.LBaaSDriver.'
+ 'get_instance')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._is_network_policy_enabled')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'release_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.delete_sg_rules')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_on_finalize_np(self, ged, k8s, is_host_network, get_project,
+ delete_sg_rules, get_sg, release_vif,
+ is_np_enabled, get_lb_instance, get_sg_instance,
+ get_services, update_services):
+ ged.return_value = [self._driver]
+ CONF.set_override('enforce_sg_rules', True, group='octavia_defaults')
+ self.addCleanup(CONF.clear_override, 'enforce_sg_rules',
+ group='octavia_defaults')
+ kp = kuryrport.KuryrPortHandler()
+ self._kp['spec']['vifs'] = self._vifs_primitive
+ is_host_network.return_value = False
+ get_project.return_value = self._project_id
+ selector = mock.sentinel.selector
+ delete_sg_rules.return_value = selector
+ get_sg.return_value = self._security_groups
+ get_services.return_value = mock.sentinel.services
+
+ with mock.patch.object(kp, 'k8s') as k8s:
+ k8s.get.return_value = self._pod
+
+ kp.on_finalize(self._kp)
+
+ k8s.get.assert_called_once_with(self._pod_uri)
+ k8s.remove_finalizer.assert_has_calls(
+ [mock.call(self._pod, constants.POD_FINALIZER),
+ mock.call(self._kp, constants.KURYRPORT_FINALIZER)])
+
+ is_host_network.assert_called_once_with(self._pod)
+ delete_sg_rules.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ release_vif.assert_has_calls([mock.call(self._pod, self._vif1,
+ self._project_id,
+ self._security_groups),
+ mock.call(self._pod, self._vif2,
+ self._project_id,
+ self._security_groups)])
+
+ get_services.assert_called_once()
+ update_services.assert_called_once_with(mock.sentinel.services,
+ selector, self._project_id)
+
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'request_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.'
+ 'DefaultPodSubnetDriver.get_subnets')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_get_vifs(self, ged, k8s, get_project, get_sg, get_subnets,
+ request_vif, update_crd):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp.k8s.get.return_value = self._pod
+ get_sg.return_value = self._security_groups
+ get_project.return_value = self._project_id
+ get_subnets.return_value = mock.sentinel.subnets
+ request_vif.return_value = self._vif1
+
+ self.assertTrue(kp.get_vifs(self._kp))
+
+ kp.k8s.get.assert_called_once_with(self._pod_uri)
+ get_project.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ get_subnets.assert_called_once_with(self._pod, self._project_id)
+ request_vif.assert_called_once_with(self._pod, self._project_id,
+ mock.sentinel.subnets,
+ self._security_groups)
+ update_crd.assert_called_once_with(self._kp,
+ {constants.DEFAULT_IFNAME:
+ {'default': True,
+ 'vif': self._vif1}})
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_get_vifs_pod_not_found(self, ged, k8s):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp.k8s.get.side_effect = k_exc.K8sResourceNotFound(self._pod)
+
+ self.assertRaises(k_exc.K8sResourceNotFound, kp.get_vifs, self._kp)
+
+ kp.k8s.get.assert_called_once_with(self._pod_uri)
+ kp.k8s.remove_finalizer.assert_called_once_with(
+ self._kp, constants.KURYRPORT_FINALIZER)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.'
+ 'DefaultPodSubnetDriver.get_subnets')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_get_vifs_subnet_error(self, ged, k8s, get_project, get_sg,
+ get_subnets):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp.k8s.get.return_value = self._pod
+ get_sg.return_value = self._security_groups
+ get_project.return_value = self._project_id
+ get_subnets.side_effect = os_exc.ResourceNotFound()
+
+ self.assertFalse(kp.get_vifs(self._kp))
+
+ kp.k8s.get.assert_called_once_with(self._pod_uri)
+ get_project.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ get_subnets.assert_called_once_with(self._pod, self._project_id)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'request_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.'
+ 'DefaultPodSubnetDriver.get_subnets')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_get_vifs_no_vif(self, ged, k8s, get_project, get_sg, get_subnets,
+ request_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp.k8s.get.return_value = self._pod
+ get_sg.return_value = self._security_groups
+ get_project.return_value = self._project_id
+ get_subnets.return_value = mock.sentinel.subnets
+ request_vif.return_value = None
+
+ self.assertFalse(kp.get_vifs(self._kp))
+
+ kp.k8s.get.assert_called_once_with(self._pod_uri)
+ get_project.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ get_subnets.assert_called_once_with(self._pod, self._project_id)
+ request_vif.assert_called_once_with(self._pod, self._project_id,
+ mock.sentinel.subnets,
+ self._security_groups)
+
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'request_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.'
+ 'DefaultPodSubnetDriver.get_subnets')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_get_vifs_with_additional_vif(self, ged, k8s, get_project, get_sg,
+ get_subnets, request_vif,
+ update_crd):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp.k8s.get.return_value = self._pod
+ fake_driver = mock.MagicMock()
+ fake_driver.request_additional_vifs.return_value = [self._vif2]
+ kp._drv_multi_vif.append(fake_driver)
+ get_sg.return_value = self._security_groups
+ get_project.return_value = self._project_id
+ get_subnets.return_value = mock.sentinel.subnets
+ request_vif.return_value = self._vif1
+
+ self.assertTrue(kp.get_vifs(self._kp))
+
+ kp.k8s.get.assert_called_once_with(self._pod_uri)
+ get_project.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ get_subnets.assert_called_once_with(self._pod, self._project_id)
+ request_vif.assert_called_once_with(self._pod, self._project_id,
+ mock.sentinel.subnets,
+ self._security_groups)
+ update_crd.assert_called_once_with(self._kp,
+ {'eth0': {'default': True,
+ 'vif': self._vif1},
+ 'eth1': {'default': False,
+ 'vif': self._vif2}})
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'release_vif')
+ @mock.patch('kuryr_kubernetes.controller.handlers.kuryrport.'
+ 'KuryrPortHandler._update_kuryrport_crd')
+ @mock.patch('kuryr_kubernetes.controller.drivers.vif_pool.MultiVIFPool.'
+ 'request_vif')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_subnet.'
+ 'DefaultPodSubnetDriver.get_subnets')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_security_groups.'
+ 'DefaultPodSecurityGroupsDriver.get_security_groups')
+ @mock.patch('kuryr_kubernetes.controller.drivers.default_project.'
+ 'DefaultPodProjectDriver.get_project')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_get_exception_on_update_crd(self, ged, k8s, get_project, get_sg,
+ get_subnets, request_vif, update_crd,
+ release_vif):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp.k8s.get.return_value = self._pod
+ get_sg.return_value = self._security_groups
+ get_project.return_value = self._project_id
+ get_subnets.return_value = mock.sentinel.subnets
+ request_vif.return_value = self._vif1
+ update_crd.side_effect = k_exc.K8sClientException()
+
+ self.assertTrue(kp.get_vifs(self._kp))
+
+ kp.k8s.get.assert_called_once_with(self._pod_uri)
+ get_project.assert_called_once_with(self._pod)
+ get_sg.assert_called_once_with(self._pod, self._project_id)
+ get_subnets.assert_called_once_with(self._pod, self._project_id)
+ request_vif.assert_called_once_with(self._pod, self._project_id,
+ mock.sentinel.subnets,
+ self._security_groups)
+ update_crd.assert_called_once_with(self._kp,
+ {constants.DEFAULT_IFNAME:
+ {'default': True,
+ 'vif': self._vif1}})
+ release_vif.assert_called_once_with(self._pod, self._vif1,
+ self._project_id,
+ self._security_groups)
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_update_kuryrport_crd(self, ged, k8s):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+
+ kp._update_kuryrport_crd(self._kp, self._vifs)
+ self._vif1.obj_reset_changes()
+ self._vif2.obj_reset_changes()
+ vif1 = self._vif1.obj_to_primitive()
+ vif2 = self._vif2.obj_to_primitive()
+
+ kp.k8s.patch_crd.assert_called_once_with(
+ 'spec', self._kp_link, {'vifs': {'eth0': {'default': True,
+ 'vif': vif1},
+ 'eth1': {'default': False,
+ 'vif': vif2}}})
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_is_network_policy_enabled(self, ged, k8s):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+
+ CONF.set_override('enabled_handlers', ['fake_handler'],
+ group='kubernetes')
+ CONF.set_override('service_security_groups_driver', 'foo',
+ group='kubernetes')
+
+ self.assertFalse(kp._is_network_policy_enabled())
+
+ CONF.set_override('enabled_handlers', ['policy'],
+ group='kubernetes')
+ CONF.set_override('service_security_groups_driver', 'foo',
+ group='kubernetes')
+
+ self.assertFalse(kp._is_network_policy_enabled())
+
+ CONF.set_override('enabled_handlers', ['policy'],
+ group='kubernetes')
+ self.addCleanup(CONF.clear_override, 'enabled_handlers',
+ group='kubernetes')
+ CONF.set_override('service_security_groups_driver', 'policy',
+ group='kubernetes')
+ self.addCleanup(CONF.clear_override, 'service_security_groups_driver',
+ group='kubernetes')
+
+ self.assertTrue(kp._is_network_policy_enabled())
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
+ 'service_matches_affected_pods')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.MultiVIFDriver.'
+ 'get_enabled_drivers')
+ def test_update_services(self, ged, k8s, smap):
+ ged.return_value = [self._driver]
+ kp = kuryrport.KuryrPortHandler()
+ kp._drv_lbaas = mock.MagicMock()
+ kp._drv_svc_sg = mock.MagicMock()
+ kp._drv_svc_sg.get_security_groups.return_value = self._security_groups
+
+ smap.side_effect = [True, False]
+ services = {'items': ['service1', 'service2']}
+
+ kp._update_services(services, mock.sentinel.crd_pod_selectors,
+ self._project_id)
+
+ smap.assert_has_calls([mock.call('service1',
+ mock.sentinel.crd_pod_selectors),
+ mock.call('service2',
+ mock.sentinel.crd_pod_selectors)])
+ kp._drv_svc_sg.get_security_groups.assert_called_once_with(
+ 'service1', self._project_id)
+ kp._drv_lbaas.update_lbaas_sg.assert_called_once_with(
+ 'service1', self._security_groups)
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
index e358b0e29..f04d49ca1 100644
--- a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
@@ -13,24 +13,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import itertools
from unittest import mock
-import uuid
import os_vif.objects.network as osv_network
import os_vif.objects.subnet as osv_subnet
-from kuryr_kubernetes import constants as k_const
-from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes.controller.handlers import lbaas as h_lbaas
-from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.tests import base as test_base
_SUPPORTED_LISTENER_PROT = ('HTTP', 'HTTPS', 'TCP')
-class TestLBaaSSpecHandler(test_base.TestCase):
+class TestServiceHandler(test_base.TestCase):
@mock.patch('kuryr_kubernetes.controller.drivers.base'
'.ServiceSecurityGroupsDriver.get_instance')
@@ -42,105 +37,179 @@ def test_init(self, m_get_drv_project, m_get_drv_subnets, m_get_drv_sg):
m_get_drv_project.return_value = mock.sentinel.drv_project
m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
m_get_drv_sg.return_value = mock.sentinel.drv_sg
- handler = h_lbaas.LBaaSSpecHandler()
+ handler = h_lbaas.ServiceHandler()
self.assertEqual(mock.sentinel.drv_project, handler._drv_project)
self.assertEqual(mock.sentinel.drv_subnets, handler._drv_subnets)
self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)
- @mock.patch('kuryr_kubernetes.utils.set_lbaas_spec')
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
- def test_on_present(self, m_get_lbaas_spec, m_set_lbaas_spec):
- svc_event = mock.sentinel.svc_event
- old_spec = mock.sentinel.old_spec
- new_spec = mock.sentinel.new_spec
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ def test_on_present(self, get_k8s_client):
+ svc_event = {
+ "metadata": {
+ "creationTimestamp": "2020-07-25T18:15:12Z",
+ "finalizers": [
+ "openstack.org/service"
+ ],
+ "labels": {
+ "run": "test"
+ },
+ "name": "test",
+ "namespace": "test",
+ "resourceVersion": "413753",
+ "selfLink": "",
+ "uid": "a026ae48-6141-4029-b743-bac48dae7f06"
+ },
+ "spec": {
+ "clusterIP": "2.2.2.2",
+ "ports": [
+ {
+ "port": 1,
+ "protocol": "TCP",
+ "targetPort": 1
+ }
+ ],
+ "selector": {
+ "run": "test"
+ },
+ "sessionAffinity": "None",
+ "type": "ClusterIP"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
+
+ old_spec = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrLoadBalancer',
+ 'metadata': {
+ 'name': 'test',
+ 'finalizers': [''],
+ },
+ 'spec': {
+ 'ip': '1.1.1.1'
+ }
+ }
+ new_spec = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrLoadBalancer',
+ 'metadata': {
+ 'name': 'test',
+ 'finalizers': [''],
+ },
+ 'spec': {
+ 'ip': '2.2.2.2'
+ }
+ }
project_id = mock.sentinel.project_id
m_drv_project = mock.Mock()
m_drv_project.get_project.return_value = project_id
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
- m_get_lbaas_spec.return_value = old_spec
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
m_handler._has_lbaas_spec_changes.return_value = True
- m_handler._generate_lbaas_spec.return_value = new_spec
+ m_handler.create_crd_spec.return_value = new_spec
m_handler._should_ignore.return_value = False
m_handler._drv_project = m_drv_project
- h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)
+ h_lbaas.ServiceHandler.on_present(m_handler, svc_event)
+ m_handler.create_crd_spec(svc_event)
+ m_handler._has_lbaas_spec_changes.return_value = True
+ m_handler._update_crd_spec(old_spec, svc_event)
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ def test_on_present_no_changes(self, get_k8s_client):
+ svc_event = {
+ "metadata": {
+ "creationTimestamp": "2020-07-25T18:15:12Z",
+ "finalizers": [
+ "openstack.org/service"
+ ],
+ "labels": {
+ "run": "test"
+ },
+ "name": "test",
+ "namespace": "test",
+ "resourceVersion": "413753",
+ "selfLink": "",
+ "uid": "a026ae48-6141-4029-b743-bac48dae7f06"
+ },
+ "spec": {
+ "clusterIP": "2.2.2.2",
+ "ports": [
+ {
+ "port": 1,
+ "protocol": "TCP",
+ "targetPort": 1
+ }
+ ],
+ "selector": {
+ "run": "test"
+ },
+ "sessionAffinity": "None",
+ "type": "ClusterIP"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }
- m_get_lbaas_spec.assert_called_once_with(svc_event)
- m_handler._has_lbaas_spec_changes.assert_called_once_with(svc_event,
- old_spec)
- m_handler._generate_lbaas_spec.assert_called_once_with(svc_event)
- m_set_lbaas_spec.assert_called_once_with(svc_event, new_spec)
+ old_spec = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrLoadBalancer',
+ 'metadata': {
+ 'name': 'test',
+ 'finalizers': [''],
+ },
+ 'spec': {
+ 'ip': '1.1.1.1'
+ }
+ }
- @mock.patch('kuryr_kubernetes.utils.set_lbaas_spec')
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
- def test_on_present_no_changes(self, m_get_lbaas_spec,
- m_set_lbaas_spec):
- svc_event = mock.sentinel.svc_event
- old_spec = mock.sentinel.old_spec
+ project_id = mock.sentinel.project_id
+ m_drv_project = mock.Mock()
+ m_drv_project.get_project.return_value = project_id
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
- m_get_lbaas_spec.return_value = old_spec
- m_handler._has_lbaas_spec_changes.return_value = False
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
+ m_handler._has_lbaas_spec_changes.return_value = True
+ m_handler.create_crd_spec.return_value = old_spec
m_handler._should_ignore.return_value = False
+ m_handler._drv_project = m_drv_project
- h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)
-
- m_get_lbaas_spec.assert_called_once_with(svc_event)
- m_handler._has_lbaas_spec_changes.assert_called_once_with(svc_event,
- old_spec)
- m_handler._generate_lbaas_spec.assert_not_called()
- m_set_lbaas_spec.assert_not_called()
-
- @mock.patch('kuryr_kubernetes.utils.set_lbaas_spec')
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
- def test_on_present_no_selector(self, m_get_lbaas_spec,
- m_set_lbaas_spec):
- svc_event = {'metadata': {'name': 'dummy_name'}}
- old_spec = mock.sentinel.old_spec
-
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
- m_get_lbaas_spec.return_value = old_spec
- m_handler._should_ignore.return_value = True
-
- h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)
-
- m_get_lbaas_spec.assert_called_once_with(svc_event)
- m_handler._has_lbaas_spec_changes.assert_not_called()
- m_handler._generate_lbaas_spec.assert_not_called()
- m_set_lbaas_spec.assert_not_called()
+ h_lbaas.ServiceHandler.on_present(m_handler, svc_event)
+ m_handler.create_crd_spec(svc_event)
+ m_handler._has_lbaas_spec_changes.return_value = False
def test_get_service_ip(self):
svc_body = {'spec': {'type': 'ClusterIP',
'clusterIP': mock.sentinel.cluster_ip}}
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
- ret = h_lbaas.LBaaSSpecHandler._get_service_ip(m_handler, svc_body)
+ ret = h_lbaas.ServiceHandler._get_service_ip(m_handler, svc_body)
self.assertEqual(mock.sentinel.cluster_ip, ret)
svc_body = {'spec': {'type': 'LoadBalancer',
'clusterIP': mock.sentinel.cluster_ip}}
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
- ret = h_lbaas.LBaaSSpecHandler._get_service_ip(m_handler, svc_body)
+ ret = h_lbaas.ServiceHandler._get_service_ip(m_handler, svc_body)
self.assertEqual(mock.sentinel.cluster_ip, ret)
def test_is_supported_type_clusterip(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
svc_body = {'spec': {'type': 'ClusterIP',
'clusterIP': mock.sentinel.cluster_ip}}
- ret = h_lbaas.LBaaSSpecHandler._is_supported_type(m_handler, svc_body)
+ ret = h_lbaas.ServiceHandler._is_supported_type(m_handler, svc_body)
self.assertEqual(ret, True)
def test_is_supported_type_loadbalancer(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
svc_body = {'spec': {'type': 'LoadBalancer',
'clusterIP': mock.sentinel.cluster_ip}}
- ret = h_lbaas.LBaaSSpecHandler._is_supported_type(m_handler, svc_body)
+ ret = h_lbaas.ServiceHandler._is_supported_type(m_handler, svc_body)
self.assertEqual(ret, True)
def _make_test_net_obj(self, cidr_list):
@@ -148,54 +217,9 @@ def _make_test_net_obj(self, cidr_list):
subnets_list = osv_subnet.SubnetList(objects=subnets)
return osv_network.Network(subnets=subnets_list)
- def test_generate_lbaas_spec(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
-
- service = mock.sentinel.service
- project_id = mock.sentinel.project_id
- ip = mock.sentinel.ip
- subnet_id = mock.sentinel.subnet_id
- ports = mock.sentinel.ports
- sg_ids = mock.sentinel.sg_ids
-
- m_drv_project = mock.Mock()
- m_drv_project.get_project.return_value = project_id
- m_drv_sg = mock.Mock()
- m_drv_sg.get_security_groups.return_value = sg_ids
- m_handler._drv_project = m_drv_project
- m_handler._drv_sg = m_drv_sg
- m_handler._get_service_ip.return_value = ip
- m_handler._get_subnet_id.return_value = subnet_id
- m_handler._generate_lbaas_port_specs.return_value = ports
-
- spec_ctor_path = 'kuryr_kubernetes.objects.lbaas.LBaaSServiceSpec'
- with mock.patch(spec_ctor_path) as m_spec_ctor:
- m_spec_ctor.return_value = mock.sentinel.ret_obj
- service = {'spec': {'type': 'ClusterIP'}}
-
- ret_obj = h_lbaas.LBaaSSpecHandler._generate_lbaas_spec(
- m_handler, service)
- self.assertEqual(mock.sentinel.ret_obj, ret_obj)
- m_spec_ctor.assert_called_once_with(
- ip=ip,
- project_id=project_id,
- subnet_id=subnet_id,
- ports=ports,
- security_groups_ids=sg_ids,
- type='ClusterIP',
- lb_ip=None)
-
- m_drv_project.get_project.assert_called_once_with(service)
- m_handler._get_service_ip.assert_called_once_with(service)
- m_handler._get_subnet_id.assert_called_once_with(
- service, project_id, ip)
- m_handler._generate_lbaas_port_specs.assert_called_once_with(service)
- m_drv_sg.get_security_groups.assert_called_once_with(
- service, project_id)
-
@mock.patch('kuryr_kubernetes.utils.has_port_changes')
def test_has_lbaas_spec_changes(self, m_port_changes):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
service = mock.sentinel.service
lbaas_spec = mock.sentinel.lbaas_spec
@@ -203,55 +227,126 @@ def test_has_lbaas_spec_changes(self, m_port_changes):
for has_port_changes in (True, False):
m_handler._has_ip_changes.return_value = has_ip_changes
m_port_changes.return_value = has_port_changes
- ret = h_lbaas.LBaaSSpecHandler._has_lbaas_spec_changes(
+ ret = h_lbaas.ServiceHandler._has_lbaas_spec_changes(
m_handler, service, lbaas_spec)
self.assertEqual(has_ip_changes or has_port_changes, ret)
def test_has_ip_changes(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
m_service = mock.MagicMock()
m_handler._get_service_ip.return_value = '1.1.1.1'
m_lbaas_spec = mock.MagicMock()
m_lbaas_spec.ip.__str__.return_value = '2.2.2.2'
- ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(
+ ret = h_lbaas.ServiceHandler._has_ip_changes(
m_handler, m_service, m_lbaas_spec)
self.assertTrue(ret)
def test_has_ip_changes__no_changes(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
- m_service = mock.MagicMock()
+ service = {
+ "metadata": {
+ "creationTimestamp": "2020-07-25T18:15:12Z",
+ "finalizers": [
+ "openstack.org/service"
+ ],
+ "labels": {
+ "run": "test"
+ },
+ "name": "test",
+ "namespace": "test",
+ "resourceVersion": "413753",
+ "selfLink": "",
+ "uid": "a026ae48-6141-4029-b743-bac48dae7f06"
+ },
+ "spec": {
+ "clusterIP": "1.1.1.1"
+ }
+ }
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
m_handler._get_service_ip.return_value = '1.1.1.1'
- m_lbaas_spec = mock.MagicMock()
- m_lbaas_spec.ip.__str__.return_value = '1.1.1.1'
+ lb_crd = {
+ 'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrLoadBalancer',
+ 'metadata': {
+ 'name': 'test',
+ 'finalizers': [''],
+ },
+ 'spec': {
+ 'ip': '1.1.1.1'
+ }
+ }
- ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(
- m_handler, m_service, m_lbaas_spec)
+ ret = h_lbaas.ServiceHandler._has_ip_changes(
+ m_handler, service, lb_crd)
self.assertFalse(ret)
def test_has_ip_changes__no_spec(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
- m_service = mock.MagicMock()
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
m_handler._get_service_ip.return_value = '1.1.1.1'
- m_lbaas_spec = None
+ service = {
+ "metadata": {
+ "creationTimestamp": "2020-07-25T18:15:12Z",
+ "finalizers": [
+ "openstack.org/service"
+ ],
+ "labels": {
+ "run": "test"
+ },
+ "name": "test",
+ "namespace": "test",
+ "resourceVersion": "413753",
+ "selfLink": "",
+ "uid": "a026ae48-6141-4029-b743-bac48dae7f06"
+ },
+ "spec": {
+ "clusterIP": "1.1.1.1"
+ }
+ }
+ lb_crd = {
+ "spec": {
+ "ip": None
+ }
+ }
- ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(
- m_handler, m_service, m_lbaas_spec)
+ ret = h_lbaas.ServiceHandler._has_ip_changes(
+ m_handler, service, lb_crd)
self.assertTrue(ret)
def test_has_ip_changes__no_nothing(self):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
- m_service = mock.MagicMock()
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
+ service = {
+ "metadata": {
+ "creationTimestamp": "2020-07-25T18:15:12Z",
+ "finalizers": [
+ "openstack.org/service"
+ ],
+ "labels": {
+ "run": "test"
+ },
+ "name": "test",
+ "namespace": "test",
+ "resourceVersion": "413753",
+ "selfLink": "",
+ "uid": "a026ae48-6141-4029-b743-bac48dae7f06"
+ },
+ "spec": {
+ "clusterIP": "1.1.1.1"
+ }
+ }
+ lb_crd = {
+ "spec": {
+ "ip": None
+ }
+ }
m_handler._get_service_ip.return_value = None
- m_lbaas_spec = None
- ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(
- m_handler, m_service, m_lbaas_spec)
+ ret = h_lbaas.ServiceHandler._has_ip_changes(
+ m_handler, service, lb_crd)
self.assertFalse(ret)
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
def test_generate_lbaas_port_specs(self, m_get_service_ports):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
m_get_service_ports.return_value = [
{'port': 1, 'name': 'X', 'protocol': 'TCP'},
{'port': 2, 'name': 'Y', 'protocol': 'TCP'}
@@ -261,7 +356,7 @@ def test_generate_lbaas_port_specs(self, m_get_service_ports):
obj_lbaas.LBaaSPortSpec(name='Y', protocol='TCP', port=2),
]
- ret = h_lbaas.LBaaSSpecHandler._generate_lbaas_port_specs(
+ ret = h_lbaas.ServiceHandler._generate_lbaas_port_specs(
m_handler, mock.sentinel.service)
self.assertEqual(expected_ports, ret)
m_get_service_ports.assert_called_once_with(
@@ -269,7 +364,7 @@ def test_generate_lbaas_port_specs(self, m_get_service_ports):
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
def test_generate_lbaas_port_specs_udp(self, m_get_service_ports):
- m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)
+ m_handler = mock.Mock(spec=h_lbaas.ServiceHandler)
m_get_service_ports.return_value = [
{'port': 1, 'name': 'X', 'protocol': 'TCP'},
{'port': 2, 'name': 'Y', 'protocol': 'UDP'}
@@ -279,7 +374,7 @@ def test_generate_lbaas_port_specs_udp(self, m_get_service_ports):
obj_lbaas.LBaaSPortSpec(name='Y', protocol='UDP', port=2),
]
- ret = h_lbaas.LBaaSSpecHandler._generate_lbaas_port_specs(
+ ret = h_lbaas.ServiceHandler._generate_lbaas_port_specs(
m_handler, mock.sentinel.service)
self.assertEqual(expected_ports, ret)
m_get_service_ports.assert_called_once_with(
@@ -292,633 +387,3 @@ def test_set_lbaas_spec(self):
def test_get_lbaas_spec(self):
self.skipTest("skipping until generalised annotation handling is "
"implemented")
-
-
-class FakeLBaaSDriver(drv_base.LBaaSDriver):
-
- def ensure_loadbalancer(self, name, project_id, subnet_id, ip,
- security_groups_ids, service_type, provider=None):
- return obj_lbaas.LBaaSLoadBalancer(name=name,
- project_id=project_id,
- subnet_id=subnet_id,
- ip=ip,
- id=str(uuid.uuid4()),
- provider=provider)
-
- def ensure_listener(self, loadbalancer, protocol, port,
- service_type='ClusterIP'):
- if protocol not in _SUPPORTED_LISTENER_PROT:
- return None
-
- name = "%s:%s:%s" % (loadbalancer.name, protocol, port)
- return obj_lbaas.LBaaSListener(name=name,
- project_id=loadbalancer.project_id,
- loadbalancer_id=loadbalancer.id,
- protocol=protocol,
- port=port,
- id=str(uuid.uuid4()))
-
- def ensure_pool(self, loadbalancer, listener):
- return obj_lbaas.LBaaSPool(name=listener.name,
- project_id=loadbalancer.project_id,
- loadbalancer_id=loadbalancer.id,
- listener_id=listener.id,
- protocol=listener.protocol,
- id=str(uuid.uuid4()))
-
- def ensure_member(self, loadbalancer, pool, subnet_id, ip, port,
- target_ref_namespace, target_ref_name, listener_port=None
- ):
- name = "%s:%s:%s" % (loadbalancer.name, ip, port)
- return obj_lbaas.LBaaSMember(name=name,
- project_id=pool.project_id,
- pool_id=pool.id,
- subnet_id=subnet_id,
- ip=ip,
- port=port,
- id=str(uuid.uuid4()))
-
- def release_loadbalancer(self, loadbalancer):
- pass
-
- def release_listener(self, loadbalancer, listener):
- pass
-
- def release_pool(self, loadbalancer, pool):
- pass
-
- def release_member(self, loadbalancer, member):
- pass
-
- def get_service_loadbalancer_name(self, namespace, svc_name):
- return "%s/%s" % (namespace, svc_name)
-
- def get_loadbalancer_pool_name(self, lb_name, namespace, svc_name):
- return "%s/%s/%s" % (lb_name, namespace, svc_name)
-
-
-class TestLoadBalancerHandler(test_base.TestCase):
-
- @mock.patch('kuryr_kubernetes.controller.drivers.base.'
- 'ServiceProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base.'
- 'ServiceSecurityGroupsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
- '.LoadBalancerHandler._cleanup_leftover_lbaas')
- @mock.patch('kuryr_kubernetes.config.CONF')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.ServicePubIpDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodSubnetsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.LBaaSDriver.get_instance')
- def test_init(self, m_get_drv_lbaas, m_get_drv_project,
- m_get_drv_subnets, m_get_drv_service_pub_ip, m_cfg,
- m_cleanup_leftover_lbaas,
- m_get_svc_sg_drv, m_get_svc_drv_project):
- m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
- m_get_drv_project.return_value = mock.sentinel.drv_project
- m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
- m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
- m_get_svc_drv_project.return_value = mock.sentinel.drv_svc_project
- m_get_svc_sg_drv.return_value = mock.sentinel.drv_sg
- m_cfg.kubernetes.endpoints_driver_octavia_provider = 'default'
- handler = h_lbaas.LoadBalancerHandler()
-
- self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)
- self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
- self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
- self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
- self.assertEqual(mock.sentinel.drv_svc_project, handler._drv_project)
- self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)
- self.assertIsNone(handler._lb_provider)
-
- @mock.patch('kuryr_kubernetes.controller.drivers.base.'
- 'ServiceProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base.'
- 'ServiceSecurityGroupsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
- '.LoadBalancerHandler._cleanup_leftover_lbaas')
- @mock.patch('kuryr_kubernetes.config.CONF')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.ServicePubIpDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodSubnetsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.LBaaSDriver.get_instance')
- def test_init_provider_ovn(self, m_get_drv_lbaas, m_get_drv_project,
- m_get_drv_subnets, m_get_drv_service_pub_ip,
- m_cfg, m_cleanup_leftover_lbaas,
- m_get_svc_sg_drv, m_get_svc_drv_project):
- m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
- m_get_drv_project.return_value = mock.sentinel.drv_project
- m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
- m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
- m_get_svc_drv_project.return_value = mock.sentinel.drv_svc_project
- m_get_svc_sg_drv.return_value = mock.sentinel.drv_sg
- m_cfg.kubernetes.endpoints_driver_octavia_provider = 'ovn'
- handler = h_lbaas.LoadBalancerHandler()
-
- self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)
- self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
- self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
- self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
- self.assertEqual(mock.sentinel.drv_svc_project, handler._drv_project)
- self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)
- self.assertEqual('ovn', handler._lb_provider)
-
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
- @mock.patch('kuryr_kubernetes.utils.set_lbaas_state')
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_state')
- def test_on_present(self, m_get_lbaas_state, m_set_lbaas_state,
- m_get_lbaas_spec):
- lbaas_spec = mock.sentinel.lbaas_spec
- lbaas_spec.type = 'DummyType'
- lbaas_spec.lb_ip = "1.2.3.4"
- lbaas_spec.project_id = 12345678
-
- lbaas_state = mock.sentinel.lbaas_state
- lbaas_state.service_pub_ip_info = None
- loadbalancer = mock.Mock()
- loadbalancer.port_id = 12345678
- lbaas_state.loadbalancer = loadbalancer
- endpoints = mock.sentinel.endpoints
-
- m_drv_service_pub_ip = mock.Mock()
- m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None
- m_drv_service_pub_ip.associate_pub_ip.return_value = True
-
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
- m_get_lbaas_spec.return_value = lbaas_spec
- m_handler._should_ignore.return_value = False
- m_get_lbaas_state.return_value = lbaas_state
- m_handler._sync_lbaas_members.return_value = True
- m_handler._drv_service_pub_ip = m_drv_service_pub_ip
- m_handler._lb_provider = None
-
- h_lbaas.LoadBalancerHandler.on_present(m_handler, endpoints)
-
- m_get_lbaas_spec.assert_called_once_with(endpoints)
- m_handler._should_ignore.assert_called_once_with(endpoints, lbaas_spec)
- m_get_lbaas_state.assert_called_once_with(endpoints)
- m_handler._sync_lbaas_members.assert_called_once_with(
- endpoints, lbaas_state, lbaas_spec)
- m_set_lbaas_state.assert_called_once_with(
- endpoints, lbaas_state)
- m_handler._update_lb_status.assert_not_called()
-
- def _fake_sync_lbaas_members(self, endpoints, lbaas_state, lbaas_spec):
- loadbalancer = mock.Mock()
- loadbalancer.port_id = 12345678
- lbaas_state.loadbalancer = loadbalancer
- lbaas_state.service_pub_ip_info = None
- return True
-
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
- @mock.patch('kuryr_kubernetes.utils.set_lbaas_state')
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_state')
- def test_on_present_loadbalancer_service(
- self, m_get_lbaas_state, m_set_lbaas_state, m_get_lbaas_spec):
- lbaas_spec = mock.sentinel.lbaas_spec
- lbaas_spec.type = 'LoadBalancer'
- lbaas_spec.lb_ip = "1.2.3.4"
- lbaas_spec.project_id = 12345678
-
- lbaas_state = mock.sentinel.lbaas_state
- lbaas_state.service_pub_ip_info = None
- loadbalancer = mock.Mock()
- loadbalancer.port_id = 12345678
- lbaas_state.loadbalancer = loadbalancer
- endpoints = mock.sentinel.endpoints
-
- floating_ip = {'floating_ip_address': '1.2.3.5',
- 'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'}
-
- service_pub_ip_info = obj_lbaas.LBaaSPubIp(
- ip_id=floating_ip['id'],
- ip_addr=floating_ip['floating_ip_address'], alloc_method='kk')
-
- m_drv_service_pub_ip = mock.Mock()
- m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = (
- service_pub_ip_info)
- m_drv_service_pub_ip.associate_pub_ip.return_value = True
-
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
- m_get_lbaas_spec.return_value = lbaas_spec
- m_handler._should_ignore.return_value = False
- m_get_lbaas_state.return_value = lbaas_state
- m_handler._sync_lbaas_members = self._fake_sync_lbaas_members
- m_handler._drv_service_pub_ip = m_drv_service_pub_ip
- m_handler._lb_provider = None
-
- h_lbaas.LoadBalancerHandler.on_present(m_handler, endpoints)
-
- m_get_lbaas_spec.assert_called_once_with(endpoints)
- m_handler._should_ignore.assert_called_once_with(endpoints, lbaas_spec)
- m_get_lbaas_state.assert_called_once_with(endpoints)
- m_set_lbaas_state.assert_called_once_with(
- endpoints, lbaas_state)
- m_handler._update_lb_status.assert_called()
-
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
- @mock.patch('kuryr_kubernetes.utils.set_lbaas_state')
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_state')
- def test_on_present_rollback(self, m_get_lbaas_state,
- m_set_lbaas_state, m_get_lbaas_spec):
- lbaas_spec = mock.sentinel.lbaas_spec
- lbaas_spec.type = 'ClusterIp'
- lbaas_spec.lb_ip = '1.2.3.4'
- lbaas_spec.project_id = '12345678'
- lbaas_state = mock.sentinel.lbaas_state
- lbaas_state.service_pub_ip_info = None
- loadbalancer = mock.Mock()
- loadbalancer.port_id = 12345678
- lbaas_state.loadbalancer = loadbalancer
- m_drv_service_pub_ip = mock.Mock()
- m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None
- m_drv_service_pub_ip.associate_pub_ip.return_value = True
-
- endpoints = mock.sentinel.endpoints
-
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
- m_get_lbaas_spec.return_value = lbaas_spec
- m_handler._should_ignore.return_value = False
- m_get_lbaas_state.return_value = lbaas_state
- m_handler._sync_lbaas_members.return_value = True
- m_set_lbaas_state.side_effect = (
- k_exc.K8sResourceNotFound('ep'))
- m_handler._drv_service_pub_ip = m_drv_service_pub_ip
- m_handler._lb_provider = None
- h_lbaas.LoadBalancerHandler.on_present(m_handler, endpoints)
-
- m_get_lbaas_spec.assert_called_once_with(endpoints)
- m_handler._should_ignore.assert_called_once_with(endpoints, lbaas_spec)
- m_get_lbaas_state.assert_called_once_with(endpoints)
- m_handler._sync_lbaas_members.assert_called_once_with(
- endpoints, lbaas_state, lbaas_spec)
- m_set_lbaas_state.assert_called_once_with(
- endpoints, lbaas_state)
- m_handler.on_deleted.assert_called_once_with(
- endpoints, lbaas_state)
-
- @mock.patch('kuryr_kubernetes.utils.get_lbaas_state')
- @mock.patch('kuryr_kubernetes.objects.lbaas'
- '.LBaaSServiceSpec')
- def test_on_cascade_deleted_lb_service(self, m_svc_spec_ctor,
- m_get_lbaas_state):
- endpoints = mock.sentinel.endpoints
- empty_spec = mock.sentinel.empty_spec
- lbaas_state = mock.Mock()
- lbaas_state.loadbalancer = mock.sentinel.loadbalancer
- lbaas_state.service_pub_ip_info = mock.sentinel.pub_ip
- m_svc_spec_ctor.return_value = empty_spec
-
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
- m_get_lbaas_state.return_value = lbaas_state
- m_handler._drv_lbaas = mock.Mock()
- m_handler._drv_service_pub_ip = mock.Mock()
-
- h_lbaas.LoadBalancerHandler.on_deleted(m_handler, endpoints)
-
- m_handler._drv_lbaas.release_loadbalancer.assert_called_once_with(
- loadbalancer=lbaas_state.loadbalancer)
- m_handler._drv_service_pub_ip.release_pub_ip.assert_called_once_with(
- lbaas_state.service_pub_ip_info)
-
- def test_should_ignore(self):
- endpoints = mock.sentinel.endpoints
- lbaas_spec = mock.sentinel.lbaas_spec
-
- # REVISIT(ivc): ddt?
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
- m_handler._has_pods.return_value = True
- m_handler._svc_handler_annotations_updated.return_value = True
-
- ret = h_lbaas.LoadBalancerHandler._should_ignore(
- m_handler, endpoints, lbaas_spec)
- self.assertEqual(False, ret)
-
- m_handler._has_pods.assert_called_once_with(endpoints)
- m_handler._svc_handler_annotations_updated.assert_called_once_with(
- endpoints, lbaas_spec)
-
- def test_has_pods(self):
- # REVISIT(ivc): ddt?
- endpoints = {'subsets': [
- {},
- {'addresses': []},
- {'addresses': [{'targetRef': {}}]},
- {'addresses': [{'targetRef': {'kind': k_const.K8S_OBJ_POD}}]}
- ]}
-
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
-
- ret = h_lbaas.LoadBalancerHandler._has_pods(m_handler, endpoints)
-
- self.assertEqual(True, ret)
-
- def test_get_pod_subnet(self):
- subnet_id = mock.sentinel.subnet_id
- project_id = mock.sentinel.project_id
- target_ref = {'kind': k_const.K8S_OBJ_POD,
- 'name': 'pod-name',
- 'namespace': 'default'}
- ip = '1.2.3.4'
- m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler)
- m_drv_pod_project = mock.Mock()
- m_drv_pod_project.get_project.return_value = project_id
- m_handler._drv_pod_project = m_drv_pod_project
- m_drv_pod_subnets = mock.Mock()
- m_drv_pod_subnets.get_subnets.return_value = {
- subnet_id: osv_network.Network(subnets=osv_subnet.SubnetList(
- objects=[osv_subnet.Subnet(cidr='1.2.3.0/24')]))}
- m_handler._drv_pod_subnets = m_drv_pod_subnets
-
- observed_subnet_id = h_lbaas.LoadBalancerHandler._get_pod_subnet(
- m_handler, target_ref, ip)
-
- self.assertEqual(subnet_id, observed_subnet_id)
-
- def _generate_lbaas_state(self, vip, targets, project_id, subnet_id):
- name = 'namespace/DUMMY_NAME'
- drv = FakeLBaaSDriver()
- lb = drv.ensure_loadbalancer(
- name, project_id, subnet_id, vip, None, 'ClusterIP')
- listeners = {}
- pools = {}
- members = {}
- for ip, (listen_port, target_port) in targets.items():
- lsnr = listeners.setdefault(listen_port, drv.ensure_listener(
- lb, 'TCP', listen_port))
- pool = pools.setdefault(listen_port, drv.ensure_pool(lb, lsnr))
- members.setdefault((ip, listen_port, target_port),
- drv.ensure_member(lb, pool,
- subnet_id, ip,
- target_port, None, None))
- return obj_lbaas.LBaaSState(
- loadbalancer=lb,
- listeners=list(listeners.values()),
- pools=list(pools.values()),
- members=list(members.values()))
-
- def _generate_lbaas_spec(self, vip, targets, project_id,
- subnet_id, prot='TCP', lbaas_type='ClusterIP'):
- return obj_lbaas.LBaaSServiceSpec(
- ip=vip,
- project_id=project_id,
- subnet_id=subnet_id,
- ports=[obj_lbaas.LBaaSPortSpec(name=str(t[0]),
- protocol=prot,
- port=t[0],
- targetPort=t[1])
- for t in targets.values()],
- type=lbaas_type)
-
- def _generate_endpoints(self, targets):
- def _target_to_port(item):
- _, (listen_port, target_port) = item
- return {'port': target_port, 'name': str(listen_port)}
- port_with_addrs = [
- (p, [e[0] for e in grp])
- for p, grp in itertools.groupby(
- sorted(targets.items()), _target_to_port)]
- return {
- 'metadata': {
- 'name': 'ep_name',
- 'namespace': 'default'
- },
- 'subsets': [
- {
- 'addresses': [
- {
- 'ip': ip,
- 'targetRef': {
- 'kind': k_const.K8S_OBJ_POD,
- 'name': ip,
- 'namespace': 'default'
- }
- }
- for ip in addrs
- ],
- 'ports': [port]
- }
- for port, addrs in port_with_addrs
- ]
- }
-
- def _sync_lbaas_members_impl(self, m_get_drv_lbaas, m_get_drv_project,
- m_get_drv_subnets, subnet_id, project_id,
- endpoints, state, spec):
- m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver())
- m_drv_project = mock.Mock()
- m_drv_project.get_project.return_value = project_id
- m_drv_subnets = mock.Mock()
- m_drv_subnets.get_subnets.return_value = {
- subnet_id: mock.sentinel.subnet}
- m_get_drv_lbaas.return_value = m_drv_lbaas
- m_get_drv_project.return_value = m_drv_project
- m_get_drv_subnets.return_value = m_drv_subnets
-
- handler = h_lbaas.LoadBalancerHandler()
-
- with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:
- m_get_pod_subnet.return_value = subnet_id
- handler._sync_lbaas_members(endpoints, state, spec)
-
- lsnrs = {lsnr.id: lsnr for lsnr in state.listeners}
- pools = {pool.id: pool for pool in state.pools}
- observed_targets = sorted(
- (str(member.ip), (
- lsnrs[pools[member.pool_id].listener_id].port,
- member.port))
- for member in state.members)
- return observed_targets
-
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
- '.LoadBalancerHandler._cleanup_leftover_lbaas')
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
- 'LoadBalancerHandler._sync_lbaas_sgs')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodSubnetsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.LBaaSDriver.get_instance')
- def test_sync_lbaas_members(self, m_get_drv_lbaas, m_get_drv_project,
- m_get_drv_subnets, m_sync_lbaas_sgs,
- m_cleanup_leftover_lbaas):
- # REVISIT(ivc): test methods separately and verify ensure/release
- project_id = str(uuid.uuid4())
- subnet_id = str(uuid.uuid4())
- current_ip = '1.1.1.1'
- current_targets = {
- '1.1.1.101': (1001, 10001),
- '1.1.1.111': (1001, 10001),
- '1.1.1.201': (2001, 20001)}
- expected_ip = '2.2.2.2'
- expected_targets = {
- '2.2.2.101': (1201, 12001),
- '2.2.2.111': (1201, 12001),
- '2.2.2.201': (2201, 22001)}
- endpoints = self._generate_endpoints(expected_targets)
- state = self._generate_lbaas_state(
- current_ip, current_targets, project_id, subnet_id)
- spec = self._generate_lbaas_spec(expected_ip, expected_targets,
- project_id, subnet_id)
-
- observed_targets = self._sync_lbaas_members_impl(
- m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
- subnet_id, project_id, endpoints, state, spec)
-
- self.assertEqual(sorted(expected_targets.items()), observed_targets)
- self.assertEqual(expected_ip, str(state.loadbalancer.ip))
-
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
- '.LoadBalancerHandler._cleanup_leftover_lbaas')
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
- 'LoadBalancerHandler._sync_lbaas_sgs')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodSubnetsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.LBaaSDriver.get_instance')
- def test_sync_lbaas_members_udp(self, m_get_drv_lbaas,
- m_get_drv_project, m_get_drv_subnets,
- m_sync_lbaas_sgs,
- m_cleanup_leftover_lbaas):
- # REVISIT(ivc): test methods separately and verify ensure/release
- project_id = str(uuid.uuid4())
- subnet_id = str(uuid.uuid4())
- current_ip = '1.1.1.1'
- current_targets = {
- '1.1.1.101': (1001, 10001),
- '1.1.1.111': (1001, 10001),
- '1.1.1.201': (2001, 20001)}
- expected_ip = '2.2.2.2'
- expected_targets = {
- '2.2.2.101': (1201, 12001),
- '2.2.2.111': (1201, 12001),
- '2.2.2.201': (2201, 22001)}
- endpoints = self._generate_endpoints(expected_targets)
- state = self._generate_lbaas_state(
- current_ip, current_targets, project_id, subnet_id)
- spec = self._generate_lbaas_spec(expected_ip, expected_targets,
- project_id, subnet_id, 'UDP')
-
- observed_targets = self._sync_lbaas_members_impl(
- m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
- subnet_id, project_id, endpoints, state, spec)
-
- self.assertEqual([], observed_targets)
- self.assertEqual(expected_ip, str(state.loadbalancer.ip))
-
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
- '.LoadBalancerHandler._cleanup_leftover_lbaas')
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
- 'LoadBalancerHandler._sync_lbaas_sgs')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodSubnetsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.LBaaSDriver.get_instance')
- def test_sync_lbaas_members_svc_listener_port_edit(
- self, m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
- m_sync_lbaas_sgs, m_cleanup_leftover_lbaas):
- # REVISIT(ivc): test methods separately and verify ensure/release
- project_id = str(uuid.uuid4())
- subnet_id = str(uuid.uuid4())
- current_ip = '1.1.1.1'
- current_targets = {
- '1.1.1.101': (1001, 10001)}
- expected_ip = '1.1.1.1'
- expected_targets = {
- '1.1.1.101': (1201, 10001)}
- endpoints = self._generate_endpoints(expected_targets)
- state = self._generate_lbaas_state(
- current_ip, current_targets, project_id, subnet_id)
- spec = self._generate_lbaas_spec(expected_ip, expected_targets,
- project_id, subnet_id)
-
- m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver())
- m_drv_project = mock.Mock()
- m_drv_project.get_project.return_value = project_id
- m_drv_subnets = mock.Mock()
- m_drv_subnets.get_subnets.return_value = {
- subnet_id: mock.sentinel.subnet}
- m_get_drv_lbaas.return_value = m_drv_lbaas
- m_get_drv_project.return_value = m_drv_project
- m_get_drv_subnets.return_value = m_drv_subnets
-
- handler = h_lbaas.LoadBalancerHandler()
-
- with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:
- m_get_pod_subnet.return_value = subnet_id
- handler._sync_lbaas_members(endpoints, state, spec)
-
- self.assertEqual(expected_ip, str(state.loadbalancer.ip))
- m_drv_lbaas.release_pool.assert_called_once()
-
- def test_get_lbaas_spec(self):
- self.skipTest("skipping until generalised annotation handling is "
- "implemented")
-
- def test_get_lbaas_state(self):
- self.skipTest("skipping until generalised annotation handling is "
- "implemented")
-
- def test_set_lbaas_state(self):
- self.skipTest("skipping until generalised annotation handling is "
- "implemented")
-
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas'
- '.LoadBalancerHandler._cleanup_leftover_lbaas')
- @mock.patch('kuryr_kubernetes.controller.handlers.lbaas.'
- 'LoadBalancerHandler._sync_lbaas_sgs')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodSubnetsDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.PodProjectDriver.get_instance')
- @mock.patch('kuryr_kubernetes.controller.drivers.base'
- '.LBaaSDriver.get_instance')
- def test_add_new_members_udp(self, m_get_drv_lbaas,
- m_get_drv_project, m_get_drv_subnets,
- m_sync_lbaas_sgs, m_cleanup_leftover_lbaas):
- project_id = str(uuid.uuid4())
- subnet_id = str(uuid.uuid4())
- current_ip = '1.1.1.1'
- current_targets = {
- '1.1.1.101': (1001, 10001),
- '1.1.1.111': (1001, 10001),
- '1.1.1.201': (2001, 20001)}
- expected_ip = '2.2.2.2'
- expected_targets = {
- '2.2.2.101': (1201, 12001),
- '2.2.2.111': (1201, 12001),
- '2.2.2.201': (2201, 22001)}
- endpoints = self._generate_endpoints(expected_targets)
- state = self._generate_lbaas_state(
- current_ip, current_targets, project_id, subnet_id)
- spec = self._generate_lbaas_spec(expected_ip, expected_targets,
- project_id, subnet_id, 'UDP')
-
- m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver())
- m_drv_project = mock.Mock()
- m_drv_project.get_project.return_value = project_id
- m_drv_subnets = mock.Mock()
- m_drv_subnets.get_subnets.return_value = {
- subnet_id: mock.sentinel.subnet}
- m_get_drv_lbaas.return_value = m_drv_lbaas
- m_get_drv_project.return_value = m_drv_project
- m_get_drv_subnets.return_value = m_drv_subnets
-
- handler = h_lbaas.LoadBalancerHandler()
- member_added = handler._add_new_members(endpoints, state, spec)
-
- self.assertEqual(member_added, False)
- m_drv_lbaas.ensure_member.assert_not_called()
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py
new file mode 100644
index 000000000..4f6140e1a
--- /dev/null
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_loadbalancer.py
@@ -0,0 +1,525 @@
+# Copyright (c) 2016 Mirantis, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+import uuid
+
+import os_vif.objects.network as osv_network
+import os_vif.objects.subnet as osv_subnet
+
+from kuryr_kubernetes import constants as k_const
+from kuryr_kubernetes.controller.drivers import base as drv_base
+from kuryr_kubernetes.controller.handlers import loadbalancer as h_lb
+from kuryr_kubernetes.tests import base as test_base
+from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
+
+_SUPPORTED_LISTENER_PROT = ('HTTP', 'HTTPS', 'TCP')
+
+
+def get_lb_crd():
+ return {
+ "metadata": {
+ "creationTimestamp": "2020-07-28T13:13:30Z",
+ "finalizers": [
+ ""
+ ],
+ "generation": 6,
+ "name": "test",
+ "namespace": "default",
+ "resourceVersion": "111871",
+ "selfLink": "test",
+ "uid": "584fe3ea-04dd-43f7-be2f-713e861694ec"
+ },
+ "spec": {
+ "ip": "1.2.3.4",
+ "ports": [
+ {
+ "port": 1,
+ "protocol": "TCP",
+ "targetPort": "1"
+ }
+ ],
+ "project_id": "1023456789120",
+ "security_groups_ids": [
+ "1d134e68-5653-4192-bda2-4214319af799",
+ "31d7b8c2-75f1-4125-9565-8c15c5cf046c"
+ ],
+ "subnet_id": "123456789120",
+ "subsets": [
+ {
+ "addresses": [
+ {
+ "ip": "1.1.1.1",
+ "nodeName": "sarka-devstack",
+ "targetRef": {
+ "kind": "Pod",
+ "name": "test-f87976f9c-thjbk",
+ "namespace": "default",
+ "resourceVersion": "111701",
+ "uid": "10234567800"
+ }
+ }
+ ],
+ "ports": [
+ {
+ "port": 2,
+ "protocol": "TCP"
+ }
+ ]
+ }
+ ],
+ "type": "LoadBalancer"
+ },
+ "status": {
+ "listeners": [
+ {
+ "id": "012345678912",
+ "loadbalancer_id": "01234567890",
+ "name": "default/test:TCP:80",
+ "port": 1,
+ "project_id": "12345678912",
+ "protocol": "TCP"
+ }
+ ],
+ "loadbalancer": {
+ "id": "01234567890",
+ "ip": "1.2.3.4",
+ "name": "default/test",
+ "port_id": "1023456789120",
+ "project_id": "12345678912",
+ "provider": "amphora",
+ "security_groups": [
+ "1d134e68-5653-4192-bda2-4214319af799",
+ "31d7b8c2-75f1-4125-9565-8c15c5cf046c"
+ ],
+ "subnet_id": "123456789120"
+ },
+ "members": [
+ {
+ "id": "0123456789",
+ "ip": "1.1.1.1",
+ "name": "default/test-f87976f9c-thjbk:8080",
+ "pool_id": "1234567890",
+ "port": 2,
+ "project_id": "12345678912",
+ "subnet_id": "123456789120"
+ }
+ ],
+ "pools": [
+ {
+ "id": "1234567890",
+ "listener_id": "012345678912",
+ "loadbalancer_id": "01234567890",
+ "name": "default/test:TCP:80",
+ "project_id": "12345678912",
+ "protocol": "TCP"
+ }
+ ],
+ 'service_pub_ip_info': {
+ 'ip_id': '1.2.3.5',
+ 'ip_addr': 'ec29d641-fec4-4f67-928a-124a76b3a888',
+ 'alloc_method': 'kk'
+ }
+ }
+ }
+
+
+class FakeLBaaSDriver(drv_base.LBaaSDriver):
+
+ def ensure_loadbalancer(self, name, project_id, subnet_id, ip,
+ security_groups_ids, service_type, provider=None):
+
+ return {
+ 'name': name,
+ 'project_id': project_id,
+ 'subnet_id': subnet_id,
+ 'ip': ip,
+ 'id': str(uuid.uuid4()),
+ 'provider': provider
+ }
+
+ def ensure_listener(self, loadbalancer, protocol, port,
+ service_type='ClusterIP'):
+ if protocol not in _SUPPORTED_LISTENER_PROT:
+ return None
+
+ name = "%s:%s:%s" % (loadbalancer['name'], protocol, port)
+ return {
+ 'name': name,
+ 'project_id': loadbalancer['project_id'],
+ 'loadbalancer_id': loadbalancer['id'],
+ 'protocol': protocol,
+ 'port': port,
+ 'id': str(uuid.uuid4())
+ }
+
+ def ensure_pool(self, loadbalancer, listener):
+ return {
+ 'name': listener['name'],
+ 'project_id': loadbalancer['project_id'],
+ 'loadbalancer_id': loadbalancer['id'],
+ 'listener_id': listener['id'],
+ 'protocol': listener['protocol'],
+ 'id': str(uuid.uuid4())
+ }
+
+ def ensure_member(self, loadbalancer, pool, subnet_id, ip, port,
+ target_ref_namespace, target_ref_name, listener_port=None
+ ):
+ name = "%s:%s:%s" % (loadbalancer['name'], ip, port)
+ return {
+ 'name': name,
+ 'project_id': pool['project_id'],
+ 'pool_id': pool['id'],
+ 'subnet_id': subnet_id,
+ 'ip': ip,
+ 'port': port,
+ 'id': str(uuid.uuid4())
+ }
+
+
+class TestKuryrLoadBalancerHandler(test_base.TestCase):
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.'
+ 'ServiceProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.'
+ 'ServiceSecurityGroupsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.config.CONF')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.ServicePubIpDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodSubnetsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.LBaaSDriver.get_instance')
+ def test_init(self, m_get_drv_lbaas, m_get_drv_project,
+ m_get_drv_subnets, m_get_drv_service_pub_ip, m_cfg,
+ m_get_svc_sg_drv, m_get_svc_drv_project):
+ m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
+ m_get_drv_project.return_value = mock.sentinel.drv_project
+ m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
+ m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
+ m_get_svc_drv_project.return_value = mock.sentinel.drv_svc_project
+ m_get_svc_sg_drv.return_value = mock.sentinel.drv_sg
+ m_cfg.kubernetes.endpoints_driver_octavia_provider = 'default'
+ handler = h_lb.KuryrLoadBalancerHandler()
+
+ self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)
+ self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
+ self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
+ self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
+ self.assertIsNone(handler._lb_provider)
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.'
+ 'ServiceProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base.'
+ 'ServiceSecurityGroupsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.config.CONF')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.ServicePubIpDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodSubnetsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.LBaaSDriver.get_instance')
+ def test_init_provider_ovn(self, m_get_drv_lbaas, m_get_drv_project,
+ m_get_drv_subnets, m_get_drv_service_pub_ip,
+ m_cfg,
+ m_get_svc_sg_drv, m_get_svc_drv_project):
+ m_get_drv_lbaas.return_value = mock.sentinel.drv_lbaas
+ m_get_drv_project.return_value = mock.sentinel.drv_project
+ m_get_drv_subnets.return_value = mock.sentinel.drv_subnets
+ m_get_drv_service_pub_ip.return_value = mock.sentinel.drv_lb_ip
+ m_get_svc_drv_project.return_value = mock.sentinel.drv_svc_project
+ m_get_svc_sg_drv.return_value = mock.sentinel.drv_sg
+ m_cfg.kubernetes.endpoints_driver_octavia_provider = 'ovn'
+ handler = h_lb .KuryrLoadBalancerHandler()
+
+ self.assertEqual(mock.sentinel.drv_lbaas, handler._drv_lbaas)
+ self.assertEqual(mock.sentinel.drv_project, handler._drv_pod_project)
+ self.assertEqual(mock.sentinel.drv_subnets, handler._drv_pod_subnets)
+ self.assertEqual(mock.sentinel.drv_lb_ip, handler._drv_service_pub_ip)
+ self.assertEqual('ovn', handler._lb_provider)
+
+ def test_on_present(self):
+ m_drv_service_pub_ip = mock.Mock()
+ m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None
+ m_drv_service_pub_ip.associate_pub_ip.return_value = True
+
+ m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+ m_handler._should_ignore.return_value = False
+ m_handler._sync_lbaas_members.return_value = True
+ m_handler._drv_service_pub_ip = m_drv_service_pub_ip
+
+ h_lb.KuryrLoadBalancerHandler.on_present(m_handler, get_lb_crd())
+
+ m_handler._should_ignore.assert_called_once_with(get_lb_crd())
+ m_handler._sync_lbaas_members.assert_called_once_with(
+ get_lb_crd())
+
+ def _fake_sync_lbaas_members(self, crd):
+ loadbalancer = {
+ "id": "01234567890",
+ "ip": "1.2.3.4",
+ "name": "default/test",
+ "port_id": "1023456789120",
+ "project_id": "12345678912",
+ "provider": "amphora",
+ "security_groups": [
+ "1d134e68-5653-4192-bda2-4214319af799",
+ "31d7b8c2-75f1-4125-9565-8c15c5cf046c"
+ ],
+ "subnet_id": "123456789120"
+ }
+ loadbalancer['port_id'] = 12345678
+ crd['status']['loadbalancer'] = loadbalancer
+ crd['status']['service_pub_ip_info'] = None
+ return True
+
+ def test_on_present_loadbalancer_service(self):
+ floating_ip = {'floating_ip_address': '1.2.3.5',
+ 'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'}
+
+ service_pub_ip_info = {
+ 'ip_id': floating_ip['id'],
+ 'ip_addr': floating_ip['floating_ip_address'],
+ 'alloc_method': 'kk'
+ }
+ crd = get_lb_crd()
+ m_drv_service_pub_ip = mock.Mock()
+ m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = (
+ service_pub_ip_info)
+ m_drv_service_pub_ip.associate_pub_ip.return_value = True
+
+ h = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+ h._should_ignore.return_value = False
+ h._sync_lbaas_members.return_value = self._fake_sync_lbaas_members(crd)
+ h._drv_service_pub_ip = m_drv_service_pub_ip
+ kubernetes = self.useFixture(k_fix.MockK8sClient()).client
+ kubernetes.get_kubernetes_client = mock.Mock()
+ kubernetes.get_kubernetes_client()
+ h_lb.KuryrLoadBalancerHandler.on_present(h, crd)
+ h._should_ignore.assert_called_once_with(crd)
+ h._update_lb_status.assert_called()
+
+ @mock.patch('kuryr_kubernetes.utils.get_lbaas_spec')
+ @mock.patch('kuryr_kubernetes.utils.set_lbaas_state')
+ @mock.patch('kuryr_kubernetes.utils.get_lbaas_state')
+ def test_on_present_rollback(self, m_get_lbaas_state,
+ m_set_lbaas_state, m_get_lbaas_spec):
+ m_drv_service_pub_ip = mock.Mock()
+ m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None
+ m_drv_service_pub_ip.associate_pub_ip.return_value = True
+
+ m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+ m_handler._should_ignore.return_value = False
+ m_handler._sync_lbaas_members.return_value = True
+ m_handler._drv_service_pub_ip = m_drv_service_pub_ip
+ h_lb.KuryrLoadBalancerHandler.on_present(m_handler, get_lb_crd())
+
+ m_handler._should_ignore.assert_called_once_with(get_lb_crd())
+ m_handler._sync_lbaas_members.assert_called_once_with(
+ get_lb_crd())
+
+ def test_on_cascade_deleted_lb_service(self):
+ m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+ m_handler._drv_lbaas = mock.Mock()
+ m_handler._drv_service_pub_ip = mock.Mock()
+ crd = get_lb_crd()
+ m_handler._drv_lbaas.release_loadbalancer(
+ loadbalancer=crd['status']['loadbalancer'])
+ m_handler._drv_service_pub_ip.release_pub_ip(
+ crd['status']['service_pub_ip_info'])
+
+ def test_should_ignore(self):
+ m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+ m_handler._has_pods.return_value = True
+
+ ret = h_lb.KuryrLoadBalancerHandler._should_ignore(
+ m_handler, get_lb_crd())
+ self.assertEqual(False, ret)
+
+ m_handler._has_pods.assert_called_once_with(get_lb_crd())
+
+ def test_has_pods(self):
+ crd = get_lb_crd()
+ m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+
+ ret = h_lb.KuryrLoadBalancerHandler._has_pods(m_handler, crd)
+
+ self.assertEqual(True, ret)
+
+ def test_get_pod_subnet(self):
+ subnet_id = mock.sentinel.subnet_id
+ project_id = mock.sentinel.project_id
+ target_ref = {'kind': k_const.K8S_OBJ_POD,
+ 'name': 'pod-name',
+ 'namespace': 'default'}
+ ip = '1.2.3.4'
+ m_handler = mock.Mock(spec=h_lb.KuryrLoadBalancerHandler)
+ m_drv_pod_project = mock.Mock()
+ m_drv_pod_project.get_project.return_value = project_id
+ m_handler._drv_pod_project = m_drv_pod_project
+ m_drv_pod_subnets = mock.Mock()
+ m_drv_pod_subnets.get_subnets.return_value = {
+ subnet_id: osv_network.Network(subnets=osv_subnet.SubnetList(
+ objects=[osv_subnet.Subnet(cidr='1.2.3.0/24')]))}
+ m_handler._drv_pod_subnets = m_drv_pod_subnets
+
+ observed_subnet_id = h_lb.KuryrLoadBalancerHandler._get_pod_subnet(
+ m_handler, target_ref, ip)
+
+ self.assertEqual(subnet_id, observed_subnet_id)
+
+ def _sync_lbaas_members_impl(self, m_get_drv_lbaas, m_get_drv_project,
+ m_get_drv_subnets, subnet_id, project_id,
+ crd):
+ m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver())
+ m_drv_project = mock.Mock()
+ m_drv_project.get_project.return_value = project_id
+ m_drv_subnets = mock.Mock()
+ m_drv_subnets.get_subnets.return_value = {
+ subnet_id: mock.sentinel.subnet}
+ m_get_drv_lbaas.return_value = m_drv_lbaas
+ m_get_drv_project.return_value = m_drv_project
+ m_get_drv_subnets.return_value = m_drv_subnets
+
+ handler = h_lb.KuryrLoadBalancerHandler()
+
+ with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:
+ m_get_pod_subnet.return_value = subnet_id
+ handler._sync_lbaas_members(crd)
+
+ lsnrs = {lsnr['id']: lsnr for lsnr in crd['status']['listeners']}
+ pools = {pool['id']: pool for pool in crd['status']['pools']}
+ observed_targets = sorted(
+ (str(member['ip']), (
+ lsnrs[pools[member['pool_id']]['listener_id']]['port'],
+ member['port']))
+ for member in crd['status']['members'])
+ return observed_targets
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodSubnetsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.LBaaSDriver.get_instance')
+ def test_sync_lbaas_members(self, m_get_drv_lbaas, m_get_drv_project,
+ m_get_drv_subnets):
+ # REVISIT(ivc): test methods separately and verify ensure/release
+ project_id = str(uuid.uuid4())
+ subnet_id = str(uuid.uuid4())
+ expected_ip = '1.2.3.4'
+ expected_targets = {
+ '1.1.1.1': (1, 2),
+ '1.1.1.1': (1, 2),
+ '1.1.1.1': (1, 2)}
+ crd = get_lb_crd()
+
+ observed_targets = self._sync_lbaas_members_impl(
+ m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
+ subnet_id, project_id, crd)
+
+ self.assertEqual(sorted(expected_targets.items()), observed_targets)
+ self.assertEqual(expected_ip, str(crd['status']['loadbalancer']['ip']))
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodSubnetsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.LBaaSDriver.get_instance')
+ def test_sync_lbaas_members_udp(self, m_get_drv_lbaas,
+ m_get_drv_project, m_get_drv_subnets):
+ # REVISIT(ivc): test methods separately and verify ensure/release
+ project_id = str(uuid.uuid4())
+ subnet_id = str(uuid.uuid4())
+ expected_ip = "1.2.3.4"
+ expected_targets = {
+ '1.1.1.1': (1, 2),
+ '1.1.1.1': (1, 2),
+ '1.1.1.1': (1, 2)}
+
+ crd = get_lb_crd()
+
+ observed_targets = self._sync_lbaas_members_impl(
+ m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets,
+ subnet_id, project_id, crd)
+
+ self.assertEqual(sorted(expected_targets.items()), observed_targets)
+ self.assertEqual(expected_ip, str(crd['status']['loadbalancer']['ip']))
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodSubnetsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.LBaaSDriver.get_instance')
+ def test_sync_lbaas_members_svc_listener_port_edit(
+ self, m_get_drv_lbaas, m_get_drv_project, m_get_drv_subnets):
+ # REVISIT(ivc): test methods separately and verify ensure/release
+ project_id = str(uuid.uuid4())
+ subnet_id = str(uuid.uuid4())
+ expected_ip = '1.2.3.4'
+ crd = get_lb_crd()
+
+ m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver())
+ m_drv_project = mock.Mock()
+ m_drv_project.get_project.return_value = project_id
+ m_drv_subnets = mock.Mock()
+ m_drv_subnets.get_subnets.return_value = {
+ subnet_id: mock.sentinel.subnet}
+ m_get_drv_lbaas.return_value = m_drv_lbaas
+ m_get_drv_project.return_value = m_drv_project
+ m_get_drv_subnets.return_value = m_drv_subnets
+
+ handler = h_lb.KuryrLoadBalancerHandler()
+
+ with mock.patch.object(handler, '_get_pod_subnet') as m_get_pod_subnet:
+ m_get_pod_subnet.return_value = subnet_id
+ handler._sync_lbaas_members(crd)
+
+ self.assertEqual(expected_ip, str(crd['status']['loadbalancer']['ip']))
+
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodSubnetsDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.PodProjectDriver.get_instance')
+ @mock.patch('kuryr_kubernetes.controller.drivers.base'
+ '.LBaaSDriver.get_instance')
+ def test_add_new_members_udp(self, m_get_drv_lbaas,
+ m_get_drv_project, m_get_drv_subnets):
+ project_id = str(uuid.uuid4())
+ subnet_id = str(uuid.uuid4())
+ crd = get_lb_crd()
+
+ m_drv_lbaas = mock.Mock(wraps=FakeLBaaSDriver())
+ m_drv_project = mock.Mock()
+ m_drv_project.get_project.return_value = project_id
+ m_drv_subnets = mock.Mock()
+ m_drv_subnets.get_subnets.return_value = {
+ subnet_id: mock.sentinel.subnet}
+ m_get_drv_lbaas.return_value = m_drv_lbaas
+ m_get_drv_project.return_value = m_drv_project
+ m_get_drv_subnets.return_value = m_drv_subnets
+
+ handler = h_lb.KuryrLoadBalancerHandler()
+ member_added = handler._add_new_members(crd)
+
+ self.assertEqual(member_added, False)
+ m_drv_lbaas.ensure_member.assert_not_called()
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py
index 1d1a4c353..548b33cb4 100644
--- a/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_pod_label.py
@@ -47,9 +47,9 @@ def setUp(self):
self._get_project = self._handler._drv_project.get_project
self._get_security_groups = self._handler._drv_sg.get_security_groups
self._set_vif_driver = self._handler._drv_vif_pool.set_vif_driver
- self._get_pod_labels = self._handler._get_pod_labels
- self._set_pod_labels = self._handler._set_pod_labels
- self._has_pod_state = self._handler._has_pod_state
+ self._get_pod_info = self._handler._get_pod_info
+ self._set_pod_info = self._handler._set_pod_info
+ self._has_vifs = self._handler._has_vifs
self._update_vif_sgs = self._handler._drv_vif_pool.update_vif_sgs
self._get_project.return_value = self._project_id
@@ -80,46 +80,48 @@ def test_init(self, m_get_lbaas_driver, m_get_project_driver,
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
def test_on_present(self, m_get_services):
m_get_services.return_value = {"items": []}
- self._has_pod_state.return_value = True
- self._get_pod_labels.return_value = {'test1': 'test'}
+ self._has_vifs.return_value = True
+ self._get_pod_info.return_value = ({'test1': 'test'}, '192.168.0.1')
p_label.PodLabelHandler.on_present(self._handler, self._pod)
- self._has_pod_state.assert_called_once_with(self._pod)
- self._get_pod_labels.assert_called_once_with(self._pod)
+ self._has_vifs.assert_called_once_with(self._pod)
+ self._get_pod_info.assert_called_once_with(self._pod)
self._get_project.assert_called_once()
self._get_security_groups.assert_called_once()
self._update_vif_sgs.assert_called_once_with(self._pod, [self._sg_id])
- self._set_pod_labels.assert_called_once_with(self._pod, None)
+ self._set_pod_info.assert_called_once_with(self._pod, (None, None))
def test_on_present_no_state(self):
- self._has_pod_state.return_value = False
+ self._has_vifs.return_value = False
resp = p_label.PodLabelHandler.on_present(self._handler, self._pod)
self.assertIsNone(resp)
- self._has_pod_state.assert_called_once_with(self._pod)
- self._get_pod_labels.assert_not_called()
- self._set_pod_labels.assert_not_called()
+ self._has_vifs.assert_called_once_with(self._pod)
+ self._get_pod_info.assert_not_called()
+ self._set_pod_info.assert_not_called()
- def test_on_present_no_labels(self):
- self._has_pod_state.return_value = True
- self._get_pod_labels.return_value = None
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
+ def test_on_present_no_labels(self, m_get_services):
+ self._has_vifs.return_value = True
+ self._get_pod_info.return_value = None, None
p_label.PodLabelHandler.on_present(self._handler, self._pod)
- self._has_pod_state.assert_called_once_with(self._pod)
- self._get_pod_labels.assert_called_once_with(self._pod)
- self._set_pod_labels.assert_not_called()
+ self._has_vifs.assert_called_once_with(self._pod)
+ self._get_pod_info.assert_called_once_with(self._pod)
+ self._set_pod_info.assert_not_called()
def test_on_present_no_changes(self):
- self._has_pod_state.return_value = True
+ self._has_vifs.return_value = True
pod_with_label = self._pod.copy()
pod_with_label['metadata']['labels'] = {'test1': 'test'}
- self._get_pod_labels.return_value = {'test1': 'test'}
+ pod_with_label['status']['podIP'] = '192.168.0.1'
+ self._get_pod_info.return_value = ({'test1': 'test'}, '192.168.0.1')
p_label.PodLabelHandler.on_present(self._handler, pod_with_label)
- self._has_pod_state.assert_called_once_with(pod_with_label)
- self._get_pod_labels.assert_called_once_with(pod_with_label)
- self._set_pod_labels.assert_not_called()
+ self._has_vifs.assert_called_once_with(pod_with_label)
+ self._get_pod_info.assert_called_once_with(pod_with_label)
+ self._set_pod_info.assert_not_called()
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py
index 0dbb01cee..57e6bf3eb 100644
--- a/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_policy.py
@@ -21,14 +21,15 @@
class TestPolicyHandler(test_base.TestCase):
- def setUp(self):
+ @mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ def setUp(self, m_get_k8s, m_get_np):
super(TestPolicyHandler, self).setUp()
self._project_id = mock.sentinel.project_id
self._policy_name = 'np-test'
self._policy_uid = mock.sentinel.policy_uid
self._policy_link = mock.sentinel.policy_link
- self._pod_sg = mock.sentinel.pod_sg
self._policy = {
'apiVersion': 'networking.k8s.io/v1',
@@ -50,198 +51,31 @@ def setUp(self):
}
}
- self._handler = mock.MagicMock(spec=policy.NetworkPolicyHandler)
-
- self._handler._drv_project = mock.Mock(
- spec=drivers.NetworkPolicyProjectDriver)
- self._handler._drv_policy = mock.MagicMock(
- spec=drivers.NetworkPolicyDriver)
- self._handler._drv_pod_sg = mock.Mock(
- spec=drivers.PodSecurityGroupsDriver)
- self._handler._drv_svc_sg = mock.Mock(
- spec=drivers.ServiceSecurityGroupsDriver)
- self._handler._drv_vif_pool = mock.MagicMock(
- spec=drivers.VIFPoolDriver)
- self._handler._drv_lbaas = mock.Mock(
- spec=drivers.LBaaSDriver)
-
- self._get_project = self._handler._drv_project.get_project
- self._get_project.return_value = self._project_id
- self._get_security_groups = (
- self._handler._drv_pod_sg.get_security_groups)
- self._set_vifs_driver = self._handler._drv_vif_pool.set_vif_driver
- self._set_vifs_driver.return_value = mock.Mock(
- spec=drivers.PodVIFDriver)
- self._update_vif_sgs = self._handler._drv_vif_pool.update_vif_sgs
- self._update_vif_sgs.return_value = None
- self._update_lbaas_sg = self._handler._drv_lbaas.update_lbaas_sg
- self._update_lbaas_sg.return_value = None
- self._remove_sg = self._handler._drv_vif_pool.remove_sg_from_pools
- self._remove_sg.return_value = None
-
- def _get_knp_obj(self):
- knp_obj = {
- 'apiVersion': 'openstack.org/v1',
- 'kind': 'KuryrNetPolicy',
- 'metadata': {
- 'name': 'np-test-network-policy',
- 'namespace': 'test-1'
- },
- 'spec': {
- 'securityGroupId': 'c1ac16f5-e198-4628-9d84-253c6001be8e',
- 'securityGroupName': 'sg-test-network-policy'
- }}
- return knp_obj
-
- @mock.patch.object(drivers.LBaaSDriver, 'get_instance')
- @mock.patch.object(drivers.ServiceSecurityGroupsDriver, 'get_instance')
- @mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
- @mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
- @mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance')
- @mock.patch.object(drivers.NetworkPolicyProjectDriver, 'get_instance')
- def test_init(self, m_get_project_driver, m_get_policy_driver,
- m_get_vif_driver, m_get_pod_sg_driver, m_get_svc_sg_driver,
- m_get_lbaas_driver):
- handler = policy.NetworkPolicyHandler()
-
- m_get_project_driver.assert_called_once()
- m_get_policy_driver.assert_called_once()
- m_get_vif_driver.assert_called_once()
- m_get_pod_sg_driver.assert_called_once()
- m_get_svc_sg_driver.assert_called_once()
- m_get_lbaas_driver.assert_called_once()
-
- self.assertEqual(m_get_project_driver.return_value,
- handler._drv_project)
- self.assertEqual(m_get_policy_driver.return_value, handler._drv_policy)
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- def test_on_present(self, m_host_network, m_get_services):
- modified_pod = mock.sentinel.modified_pod
- match_pod = mock.sentinel.match_pod
- m_host_network.return_value = False
-
- knp_on_ns = self._handler._drv_policy.knps_on_namespace
- knp_on_ns.return_value = True
- namespaced_pods = self._handler._drv_policy.namespaced_pods
- ensure_nw_policy = self._handler._drv_policy.ensure_network_policy
- ensure_nw_policy.return_value = [modified_pod]
- affected_pods = self._handler._drv_policy.affected_pods
- affected_pods.return_value = [match_pod]
- sg1 = [mock.sentinel.sg1]
- sg2 = [mock.sentinel.sg2]
- self._get_security_groups.side_effect = [sg1, sg2]
- m_get_services.return_value = {'items': []}
-
- policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
- namespaced_pods.assert_not_called()
- ensure_nw_policy.assert_called_once_with(self._policy,
- self._project_id)
- affected_pods.assert_called_once_with(self._policy)
-
- calls = [mock.call(modified_pod, self._project_id),
- mock.call(match_pod, self._project_id)]
- self._get_security_groups.assert_has_calls(calls)
-
- calls = [mock.call(modified_pod, sg1), mock.call(match_pod, sg2)]
- self._update_vif_sgs.assert_has_calls(calls)
- self._update_lbaas_sg.assert_not_called()
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- def test_on_present_without_knps_on_namespace(self, m_host_network,
- m_get_services):
- modified_pod = mock.sentinel.modified_pod
- match_pod = mock.sentinel.match_pod
- m_host_network.return_value = False
-
- ensure_nw_policy = self._handler._drv_policy.ensure_network_policy
- ensure_nw_policy.return_value = [modified_pod]
- affected_pods = self._handler._drv_policy.affected_pods
- affected_pods.return_value = [match_pod]
- sg2 = [mock.sentinel.sg2]
- sg3 = [mock.sentinel.sg3]
- self._get_security_groups.side_effect = [sg2, sg3]
- m_get_services.return_value = {'items': []}
-
- policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
- ensure_nw_policy.assert_called_once_with(self._policy,
- self._project_id)
- affected_pods.assert_called_once_with(self._policy)
-
- calls = [mock.call(modified_pod, self._project_id),
- mock.call(match_pod, self._project_id)]
- self._get_security_groups.assert_has_calls(calls)
-
- calls = [mock.call(modified_pod, sg2),
- mock.call(match_pod, sg3)]
- self._update_vif_sgs.assert_has_calls(calls)
- self._update_lbaas_sg.assert_not_called()
+ self.k8s = mock.Mock()
+ m_get_k8s.return_value = self.k8s
+ self.m_get_k8s = m_get_k8s
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- def test_on_present_with_services(self, m_host_network, m_get_services):
- modified_pod = mock.sentinel.modified_pod
- match_pod = mock.sentinel.match_pod
- m_host_network.return_value = False
+ self.np_driver = mock.Mock()
+ m_get_np.return_value = self.np_driver
+ self._m_get_np = m_get_np
- self._handler._is_egress_only_policy.return_value = False
- self._handler._is_service_affected.return_value = True
- knp_on_ns = self._handler._drv_policy.knps_on_namespace
- knp_on_ns.return_value = True
- namespaced_pods = self._handler._drv_policy.namespaced_pods
- ensure_nw_policy = self._handler._drv_policy.ensure_network_policy
- ensure_nw_policy.return_value = [modified_pod]
- affected_pods = self._handler._drv_policy.affected_pods
- affected_pods.return_value = [match_pod]
- sg1 = [mock.sentinel.sg1]
- sg2 = [mock.sentinel.sg2]
- self._get_security_groups.side_effect = [sg1, sg2]
- service = {'metadata': {'name': 'service-test'},
- 'spec': {'selector': mock.sentinel.selector}}
- m_get_services.return_value = {'items': [service]}
+ self.handler = policy.NetworkPolicyHandler()
- policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
- namespaced_pods.assert_not_called()
- ensure_nw_policy.assert_called_once_with(self._policy,
- self._project_id)
- affected_pods.assert_called_once_with(self._policy)
+ def test_init(self):
+ self.m_get_k8s.assert_called_once()
+ self._m_get_np.assert_called_once()
- calls = [mock.call(modified_pod, self._project_id),
- mock.call(match_pod, self._project_id)]
- self._get_security_groups.assert_has_calls(calls)
- calls = [mock.call(modified_pod, sg1), mock.call(match_pod, sg2)]
- self._update_vif_sgs.assert_has_calls(calls)
- self._handler._is_service_affected.assert_called_once_with(
- service, [modified_pod, match_pod])
- self._update_lbaas_sg.assert_called_once()
+ self.assertEqual(self.np_driver, self.handler._drv_policy)
+ self.assertEqual(self.k8s, self.handler.k8s)
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- def test_on_deleted(self, m_host_network, m_get_services):
- namespace_pod = mock.sentinel.namespace_pod
- match_pod = mock.sentinel.match_pod
- m_host_network.return_value = False
- affected_pods = self._handler._drv_policy.affected_pods
- affected_pods.return_value = [match_pod]
- get_knp_crd = self._handler._drv_policy.get_kuryrnetpolicy_crd
- knp_obj = self._get_knp_obj()
- get_knp_crd.return_value = knp_obj
- sg1 = [mock.sentinel.sg1]
- sg2 = [mock.sentinel.sg2]
- self._get_security_groups.side_effect = [sg1, sg2]
- m_get_services.return_value = {'items': []}
- release_nw_policy = self._handler._drv_policy.release_network_policy
- knp_on_ns = self._handler._drv_policy.knps_on_namespace
- knp_on_ns.return_value = False
- ns_pods = self._handler._drv_policy.namespaced_pods
- ns_pods.return_value = [namespace_pod]
+ def test_on_finalize(self):
+ self.handler.on_finalize(self._policy)
+ self.np_driver.release_network_policy.assert_called_once_with(
+ self._policy)
- policy.NetworkPolicyHandler.on_deleted(self._handler, self._policy)
- release_nw_policy.assert_called_once_with(knp_obj)
- self._get_security_groups.assert_called_once_with(match_pod,
- self._project_id)
- self._update_vif_sgs.assert_called_once_with(match_pod, sg1)
- self._update_lbaas_sg.assert_not_called()
- self._remove_sg.assert_called_once()
+ def test_on_present(self):
+ self.handler.on_present(self._policy)
+ self.k8s.add_finalizer.assert_called_once_with(
+ self._policy, 'kuryr.openstack.org/networkpolicy-finalizer')
+ self.np_driver.ensure_network_policy.assert_called_once_with(
+ self._policy)
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py
index 68e66163b..8ea59ed9b 100644
--- a/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_vif.py
@@ -16,6 +16,7 @@
from unittest import mock
from os_vif import objects as os_obj
+from oslo_serialization import jsonutils
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drivers
@@ -43,15 +44,30 @@ def setUp(self):
self._pod_version = mock.sentinel.pod_version
self._pod_link = mock.sentinel.pod_link
self._pod_namespace = mock.sentinel.namespace
+ self._pod_uid = mock.sentinel.pod_uid
+ self._pod_name = 'pod1'
self._pod = {
'metadata': {'resourceVersion': self._pod_version,
'selfLink': self._pod_link,
+ 'name': self._pod_name,
'namespace': self._pod_namespace},
'status': {'phase': k_const.K8S_POD_STATUS_PENDING},
'spec': {'hostNetwork': False,
'nodeName': 'hostname'}
}
+ self._kp_version = mock.sentinel.kp_version
+ self._kp_link = mock.sentinel.kp_link
+ self._kp = {'apiVersion': 'openstack.org/v1',
+ 'kind': 'KuryrPort',
+ 'metadata': {'resourceVersion': self._kp_version,
+ 'selfLink': mock.sentinel.kp_link,
+ 'namespace': self._pod_namespace,
+ 'labels': mock.ANY},
+ 'spec': {'podUid': self._pod_uid,
+ 'podNodeName': 'hostname',
+ 'vifs': {}}}
+
self._handler = mock.MagicMock(spec=h_vif.VIFHandler)
self._handler._drv_project = mock.Mock(spec=drivers.PodProjectDriver)
self._handler._drv_subnets = mock.Mock(spec=drivers.PodSubnetsDriver)
@@ -68,7 +84,7 @@ def setUp(self):
self._request_vif = self._handler._drv_vif_pool.request_vif
self._release_vif = self._handler._drv_vif_pool.release_vif
self._activate_vif = self._handler._drv_vif_pool.activate_vif
- self._set_pod_state = self._handler._set_pod_state
+ self._matc = self._handler._move_annotations_to_crd
self._is_pod_scheduled = self._handler._is_pod_scheduled
self._is_pod_completed = self._handler._is_pod_completed
self._request_additional_vifs = \
@@ -152,224 +168,183 @@ def test_is_pod_completed_failed(self):
self.assertTrue(h_vif.VIFHandler._is_pod_completed({'status': {'phase':
k_const.K8S_POD_STATUS_FAILED}}))
- @mock.patch('oslo_config.cfg.CONF')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'update_port_pci_info')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present(self, m_get_pod_state, m_host_network, m_update_pci,
- m_conf):
- m_get_pod_state.return_value = self._state
- m_host_network.return_value = False
- self._vif.plugin = 'sriov'
- m_conf.sriov.enable_node_annotations = True
- h_vif.VIFHandler.on_present(self._handler, self._pod)
-
- m_get_pod_state.assert_called_once_with(self._pod)
- m_update_pci.assert_called_once_with(self._pod, self._vif)
- self._request_vif.assert_not_called()
- self._request_additional_vifs.assert_not_called()
- self._activate_vif.assert_not_called()
- self._set_pod_state.assert_not_called()
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_host_network(self, m_get_pod_state, m_host_network):
- m_get_pod_state.return_value = self._state
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_host_network(self, m_get_kuryrport, m_host_network):
+ m_get_kuryrport.return_value = self._kp
m_host_network.return_value = True
+ self._matc.return_value = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
- m_get_pod_state.assert_called_once()
+ self._matc.assert_called_once_with(self._pod)
+ m_get_kuryrport.assert_called_once()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
- self._set_pod_state.assert_not_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_not_pending(self, m_get_pod_state, m_host_network):
- m_get_pod_state.return_value = self._state
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_not_pending(self, m_get_kuryrport, m_host_network):
+ m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
self._is_pod_scheduled.return_value = False
+ self._matc.return_value = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
- m_get_pod_state.assert_called_once()
+ self._matc.assert_called_once_with(self._pod)
+ m_get_kuryrport.assert_called_once()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
- self._set_pod_state.assert_not_called()
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_on_completed_with_annotation(self, m_get_pod_state):
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_on_completed_with_annotation(self, m_get_kuryrport):
self._is_pod_completed.return_value = True
- m_get_pod_state.return_value = self._state
+ m_get_kuryrport.return_value = self._kp
+ self._matc.return_value = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
- self._handler.on_deleted.assert_called_once_with(self._pod)
- self._set_pod_state.assert_called_once_with(self._pod, None)
+ self._matc.assert_called_once_with(self._pod)
+ self._handler.on_finalize.assert_called_once_with(self._pod)
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_on_completed_without_annotation(self, m_get_pod_state):
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_on_completed_without_annotation(self, m_get_kuryrport):
self._is_pod_completed.return_value = True
- m_get_pod_state.return_value = None
+ m_get_kuryrport.return_value = None
+ self._matc.return_value = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
- self._handler.on_deleted.assert_not_called()
-
- self._set_pod_state.assert_not_called()
+ self._matc.assert_called_once_with(self._pod)
+ self._handler.on_finalize.assert_not_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
- @mock.patch('oslo_config.cfg.CONF')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.'
- 'update_port_pci_info')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_activate(self, m_get_pod_state, m_host_network,
- m_get_services, m_update_pci, m_conf):
- m_get_pod_state.return_value = self._state
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_create(self, m_get_kuryrport, m_host_network,
+ m_get_k8s_client):
+ m_get_kuryrport.return_value = None
m_host_network.return_value = False
- m_get_services.return_value = {"items": []}
- self._vif.active = False
- self._vif.plugin = 'sriov'
- m_conf.sriov.enable_node_annotations = True
+ self._matc.return_value = False
+ k8s = mock.MagicMock()
+ m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
- m_get_pod_state.assert_called_once_with(self._pod)
- m_update_pci.assert_called_once_with(self._pod, self._vif)
- self._activate_vif.assert_called_once_with(self._vif)
- self._set_pod_state.assert_called_once_with(self._pod, self._state)
- self._request_vif.assert_not_called()
- self._request_additional_vifs.assert_not_called()
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_create(self, m_get_pod_state, m_host_network):
- m_get_pod_state.return_value = None
- m_host_network.return_value = False
-
- h_vif.VIFHandler.on_present(self._handler, self._pod)
-
- m_get_pod_state.assert_called_once_with(self._pod)
- self._request_vif.assert_called_once_with(
- self._pod, self._project_id, self._subnets, self._security_groups)
- self._request_additional_vifs.assert_called_once_with(
- self._pod, self._project_id, self._security_groups)
- self._set_pod_state.assert_called_once_with(self._pod, self._state)
- self._activate_vif.assert_not_called()
+ m_get_kuryrport.assert_called_once_with(self._pod)
+ self._matc.assert_called_once_with(self._pod)
+ self._handler._add_kuryrport_crd.assert_called_once_with(self._pod)
+ k8s.add_finalizer.assert_called_once_with(self._pod,
+ k_const.POD_FINALIZER)
- @mock.patch('oslo_config.cfg.CONF')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_create_with_additional_vifs(self, m_get_pod_state,
- m_host_network, m_conf):
- m_get_pod_state.return_value = None
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_update(self, m_get_kuryrport, m_host_network):
+ m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
- ifname_prefix = 'baz'
- m_conf.kubernetes.additional_ifname_prefix = ifname_prefix
- additional_vif = os_obj.vif.VIFBase()
- self._state.additional_vifs = {ifname_prefix+'1': additional_vif}
- self._request_additional_vifs.return_value = [additional_vif]
+ self._matc.return_value = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
- m_get_pod_state.assert_called_once_with(self._pod)
- self._request_vif.assert_called_once_with(
- self._pod, self._project_id, self._subnets, self._security_groups)
- self._request_additional_vifs.assert_called_once_with(
- self._pod, self._project_id, self._security_groups)
- self._set_pod_state.assert_called_once_with(self._pod, self._state)
- self._activate_vif.assert_not_called()
+ self._matc.assert_called_once_with(self._pod)
+ m_get_kuryrport.assert_called_once_with(self._pod)
+ self._handler._add_kuryrport_crd.assert_not_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_present_rollback(self, m_get_pod_state, m_host_network):
- m_get_pod_state.return_value = None
- m_host_network.return_value = False
- self._set_pod_state.side_effect = k_exc.K8sClientException
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_present_upgrade(self, m_get_kuryrport, m_host_network):
+ m_get_kuryrport.return_value = self._kp
+ m_host_network.return_value = True
+ self._matc.return_value = True
h_vif.VIFHandler.on_present(self._handler, self._pod)
- m_get_pod_state.assert_called_once_with(self._pod)
- self._request_vif.assert_called_once_with(
- self._pod, self._project_id, self._subnets, self._security_groups)
- self._request_additional_vifs.assert_called_once_with(
- self._pod, self._project_id, self._security_groups)
- self._set_pod_state.assert_called_once_with(self._pod, self._state)
- self._release_vif.assert_called_once_with(self._pod, self._vif,
- self._project_id,
- self._security_groups)
+ self._matc.assert_called_once_with(self._pod)
+ m_get_kuryrport.assert_not_called()
+ self._request_vif.assert_not_called()
+ self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_deleted(self, m_get_pod_state, m_host_network, m_get_services):
- m_get_pod_state.return_value = self._state
- m_host_network.return_value = False
- m_get_services.return_value = {"items": []}
- h_vif.VIFHandler.on_deleted(self._handler, self._pod)
-
- m_get_pod_state.assert_called_once_with(self._pod)
- self._release_vif.assert_called_once_with(self._pod, self._vif,
- self._project_id,
- self._security_groups)
-
- @mock.patch('oslo_config.cfg.CONF')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_deleted_with_additional_vifs(self, m_get_pod_state,
- m_host_network, m_get_services,
- m_conf):
- additional_vif = os_obj.vif.VIFBase()
- ifname_prefix = 'bar'
- m_conf.kubernetes.additional_ifname_prefix = ifname_prefix
- self._state.additional_vifs = {ifname_prefix+'1': additional_vif}
- m_get_pod_state.return_value = self._state
- m_host_network.return_value = False
- m_get_services.return_value = {"items": []}
-
- h_vif.VIFHandler.on_deleted(self._handler, self._pod)
-
- self._release_vif.assert_any_call(self._pod, self._vif,
- self._project_id,
- self._security_groups)
- self._release_vif.assert_any_call(self._pod, additional_vif,
- self._project_id,
- self._security_groups)
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_deleted_host_network(self, m_get_pod_state, m_host_network):
- m_get_pod_state.return_value = self._state
- m_host_network.return_value = True
-
- h_vif.VIFHandler.on_deleted(self._handler, self._pod)
-
- m_get_pod_state.assert_not_called()
- self._release_vif.assert_not_called()
-
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
- @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
- def test_on_deleted_no_annotation(self, m_get_pod_state, m_host_network,
- m_get_services):
- m_get_pod_state.return_value = None
- m_host_network.return_value = False
- m_get_services.return_value = {"items": []}
-
- h_vif.VIFHandler.on_deleted(self._handler, self._pod)
-
- m_get_pod_state.assert_called_once_with(self._pod)
- self._release_vif.assert_not_called()
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_finalize_crd(self, m_get_kuryrport, m_get_k8s_client):
+ m_get_kuryrport.return_value = self._kp
+ k8s = mock.MagicMock()
+ m_get_k8s_client.return_value = k8s
+
+ h_vif.VIFHandler.on_finalize(self._handler, self._pod)
+
+ k8s.delete.assert_called_once_with(
+ h_vif.KURYRPORT_URI.format(
+ ns=self._pod["metadata"]["namespace"],
+ crd=self._pod["metadata"]["name"]))
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_finalize_crd_exception(self, m_get_kuryrport,
+ m_get_k8s_client):
+ m_get_kuryrport.return_value = self._kp
+ k8s = mock.MagicMock()
+ m_get_k8s_client.return_value = k8s
+ k8s.delete.side_effect = k_exc.K8sClientException
+
+ self.assertRaises(k_exc.ResourceNotReady, h_vif.VIFHandler
+ .on_finalize, self._handler, self._pod)
+
+ k8s.delete.assert_called_once_with(
+ h_vif.KURYRPORT_URI.format(
+ ns=self._pod["metadata"]["namespace"],
+ crd=self._pod["metadata"]["name"]))
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
+ def test_on_finalize_crd_not_found(self, m_get_kuryrport,
+ m_get_k8s_client):
+ m_get_kuryrport.return_value = self._kp
+ k8s = mock.MagicMock()
+ m_get_k8s_client.return_value = k8s
+ k8s.delete.side_effect = k_exc.K8sResourceNotFound(self._pod)
+
+ h_vif.VIFHandler.on_finalize(self._handler, self._pod)
+
+ k8s.delete.assert_called_once_with(
+ h_vif.KURYRPORT_URI.format(
+ ns=self._pod["metadata"]["namespace"],
+ crd=self._pod["metadata"]["name"]))
+ (k8s.remove_finalizer
+ .assert_called_once_with(self._pod, k_const.POD_FINALIZER))
+
+ def test_move_annotations_to_crd_no_annotations(self):
+ res = h_vif.VIFHandler._move_annotations_to_crd(self._handler,
+ self._pod)
+ self.assertFalse(res)
+
+ @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
+ def test_move_annotations_to_crd_with_annotations(self, m_get_k8s_client):
+ vifobj = os_obj.vif.VIFOpenVSwitch()
+ state = vif.PodState(default_vif=vifobj)
+ annotation = jsonutils.dumps(state.obj_to_primitive())
+ self._pod['metadata']['annotations'] = {
+ k_const.K8S_ANNOTATION_VIF: annotation}
+ vifs = {'eth0': {'default': True, 'vif': vifobj.obj_to_primitive()}}
+ k8s = mock.MagicMock()
+ m_get_k8s_client.return_value = k8s
+
+ res = h_vif.VIFHandler._move_annotations_to_crd(self._handler,
+ self._pod)
+ self.assertTrue(res)
+ self._handler._add_kuryrport_crd.assert_called_once_with(self._pod,
+ vifs)
+
+ m_get_k8s_client.assert_called_once()
+ k8s.remove_annotations.assert_called_once_with(
+ self._pod['metadata']['selfLink'], k_const.K8S_ANNOTATION_VIF)
diff --git a/kuryr_kubernetes/tests/unit/test_utils.py b/kuryr_kubernetes/tests/unit/test_utils.py
index 6044a1051..27a9fad24 100644
--- a/kuryr_kubernetes/tests/unit/test_utils.py
+++ b/kuryr_kubernetes/tests/unit/test_utils.py
@@ -20,7 +20,6 @@
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes import exceptions as k_exc
-from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.objects import vif
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
@@ -185,40 +184,120 @@ def test_get_service_ports(self):
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
def test_has_port_changes(self, m_get_service_ports):
- service = mock.MagicMock()
- m_get_service_ports.return_value = [
- {'port': 1, 'name': 'X', 'protocol': 'TCP', 'targetPort': 1},
- ]
-
- lbaas_spec = mock.MagicMock()
- lbaas_spec.ports = [
- obj_lbaas.LBaaSPortSpec(name='X', protocol='TCP', port=1,
- targetPort=1),
- obj_lbaas.LBaaSPortSpec(name='Y', protocol='TCP', port=2,
- targetPort=2),
- ]
-
- ret = utils.has_port_changes(service, lbaas_spec)
+ service = {
+ 'metadata': {
+ 'selfLink': ""
+ },
+ 'spec': {
+ 'ports': [
+ {
+ 'port': 1,
+ 'name': 'X',
+ 'protocol': 'TCP',
+ 'targetPort': '1'
+ }
+ ]
+ }
+ }
+ lb_crd_spec = {
+ 'spec': {
+ 'ports': [
+ {
+ 'name': 'Y',
+ 'protocol': 'TCP',
+ 'port': 2,
+ 'targetPort': 2
+ }
+ ]
+ }
+ }
+ ret = utils.has_port_changes(service, lb_crd_spec)
self.assertTrue(ret)
@mock.patch('kuryr_kubernetes.utils.get_service_ports')
- def test_has_port_changes__no_changes(self, m_get_service_ports):
- service = mock.MagicMock()
- m_get_service_ports.return_value = [
- {'port': 1, 'name': 'X', 'protocol': 'TCP', 'targetPort': '1'},
- {'port': 2, 'name': 'Y', 'protocol': 'TCP', 'targetPort': '2'}
- ]
-
- lbaas_spec = mock.MagicMock()
- lbaas_spec.ports = [
- obj_lbaas.LBaaSPortSpec(name='X', protocol='TCP', port=1,
- targetPort=1),
- obj_lbaas.LBaaSPortSpec(name='Y', protocol='TCP', port=2,
- targetPort=2),
- ]
-
- ret = utils.has_port_changes(service, lbaas_spec)
+ def test_has_port_changes_more_ports(self, m_get_service_ports):
+ service = {
+ 'metadata': {
+ 'selfLink': ""
+ },
+ 'spec': {
+ 'ports': [
+ {
+ 'port': 1,
+ 'name': 'X',
+ 'protocol': 'TCP',
+ 'targetPort': '1'
+ }
+ ]
+ }
+ }
+ lb_crd_spec = {
+ 'spec': {
+ 'ports': [
+ {
+ 'name': 'X',
+ 'protocol': 'TCP',
+ 'port': 1,
+ 'targetPort': 1
+ },
+ {
+ 'name': 'Y',
+ 'protocol': 'TCP',
+ 'port': 2,
+ 'targetPort': 2
+ }
+ ]
+ }
+ }
+
+ ret = utils.has_port_changes(service, lb_crd_spec)
+ self.assertTrue(ret)
+
+ @mock.patch('kuryr_kubernetes.utils.get_service_ports')
+ def test_has_port_changes_no_changes(self, m_get_service_ports):
+
+ service = {
+ 'metadata': {
+ 'selfLink': ""
+ },
+ 'spec': {
+ 'ports': [
+ {
+ 'port': 1,
+ 'name': 'X',
+ 'protocol': 'TCP',
+ 'targetPort': '1'
+ },
+ {
+ 'name': 'Y',
+ 'protocol': 'TCP',
+ 'port': 2,
+ 'targetPort': '2'
+ }
+ ]
+ }
+ }
+
+ lb_crd_spec = {
+ 'spec': {
+ 'ports': [
+ {
+ 'name': 'X',
+ 'protocol': 'TCP',
+ 'port': 1,
+ 'targetPort': '1'
+ },
+ {
+ 'name': 'Y',
+ 'protocol': 'TCP',
+ 'port': 2,
+ 'targetPort': '2'
+ }
+ ]
+ }
+ }
+ ret = utils.has_port_changes(service, lb_crd_spec)
self.assertFalse(ret)
def test_get_nodes_ips(self):
diff --git a/kuryr_kubernetes/utils.py b/kuryr_kubernetes/utils.py
index afc975a22..52f1d3dd8 100644
--- a/kuryr_kubernetes/utils.py
+++ b/kuryr_kubernetes/utils.py
@@ -97,15 +97,15 @@ def convert_netns(netns):
return netns
-def get_pod_unique_name(pod):
- """Returns a unique name for the pod.
+def get_res_unique_name(resource):
+ """Returns a unique name for the resource like pod or CRD.
- It returns a pod unique name for the pod composed of its name and the
+ It returns a unique name for the resource composed of its name and the
namespace it is running on.
- :returns: String with namespace/name of the pod
+ :returns: String with namespace/name of the resource
"""
- return "%(namespace)s/%(name)s" % pod['metadata']
+ return "%(namespace)s/%(name)s" % resource['metadata']
def check_suitable_multi_pool_driver_opt(pool_driver, pod_driver):
@@ -252,6 +252,15 @@ def extract_pod_annotation(annotation):
return obj
+def get_vifs_from_crd(crd):
+ result = {}
+ for ifname in crd['spec']['vifs']:
+ result[ifname] = (objects.base.VersionedObject
+ .obj_from_primitive(crd['spec']['vifs']
+ [ifname]['vif']))
+ return result
+
+
def has_limit(quota):
NO_LIMIT = -1
return quota['limit'] != NO_LIMIT
@@ -370,24 +379,27 @@ def get_endpoints_link(service):
return "/".join(link_parts)
-def has_port_changes(service, lbaas_spec):
+def has_port_changes(service, loadbalancer_crd):
+ if not loadbalancer_crd:
+ return False
link = service['metadata']['selfLink']
-
- fields = obj_lbaas.LBaaSPortSpec.fields
- svc_port_set = {tuple(port[attr] for attr in fields)
- for port in get_service_ports(service)}
-
- spec_port_set = {tuple(getattr(port, attr)
- for attr in fields
- if port.obj_attr_is_set(attr))
- for port in lbaas_spec.ports}
-
- if svc_port_set != spec_port_set:
- LOG.debug("LBaaS spec ports %(spec_ports)s != %(svc_ports)s "
- "for %(link)s" % {'spec_ports': spec_port_set,
- 'svc_ports': svc_port_set,
- 'link': link})
- return svc_port_set != spec_port_set
+ svc_port_set = service['spec'].get('ports')
+
+ for port in svc_port_set:
+ port['targetPort'] = str(port['targetPort'])
+ spec_port_set = loadbalancer_crd['spec'].get('ports', [])
+ if spec_port_set:
+ if len(svc_port_set) != len(spec_port_set):
+ return True
+ pairs = zip(svc_port_set, spec_port_set)
+ diff = any(x != y for x, y in pairs)
+ if diff:
+ LOG.debug("LBaaS spec ports %(spec_ports)s != %(svc_ports)s "
+ "for %(link)s" % {'spec_ports': spec_port_set,
+ 'svc_ports': svc_port_set,
+ 'link': link})
+ return diff
+ return False
def get_service_ports(service):
diff --git a/setup.cfg b/setup.cfg
index 9f0ab915c..bc29f44b7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -100,15 +100,17 @@ kuryr_kubernetes.controller.drivers.vif_pool =
kuryr_kubernetes.controller.handlers =
vif = kuryr_kubernetes.controller.handlers.vif:VIFHandler
- lbaasspec = kuryr_kubernetes.controller.handlers.lbaas:LBaaSSpecHandler
- lb = kuryr_kubernetes.controller.handlers.lbaas:LoadBalancerHandler
+ service = kuryr_kubernetes.controller.handlers.lbaas:ServiceHandler
+ endpoints = kuryr_kubernetes.controller.handlers.lbaas:EndpointsHandler
+ kuryrloadbalancer = kuryr_kubernetes.controller.handlers.loadbalancer:KuryrLoadBalancerHandler
namespace = kuryr_kubernetes.controller.handlers.namespace:NamespaceHandler
policy = kuryr_kubernetes.controller.handlers.policy:NetworkPolicyHandler
pod_label = kuryr_kubernetes.controller.handlers.pod_label:PodLabelHandler
- kuryrnetpolicy = kuryr_kubernetes.controller.handlers.kuryrnetpolicy:KuryrNetPolicyHandler
+ kuryrnetworkpolicy = kuryr_kubernetes.controller.handlers.kuryrnetworkpolicy:KuryrNetworkPolicyHandler
kuryrnetwork = kuryr_kubernetes.controller.handlers.kuryrnetwork:KuryrNetworkHandler
kuryrnetwork_population = kuryr_kubernetes.controller.handlers.kuryrnetwork_population:KuryrNetworkPopulationHandler
test_handler = kuryr_kubernetes.tests.unit.controller.handlers.test_fake_handler:TestHandler
+ kuryrport = kuryr_kubernetes.controller.handlers.kuryrport:KuryrPortHandler
kuryr_kubernetes.controller.drivers.multi_vif =
noop = kuryr_kubernetes.controller.drivers.multi_vif:NoopMultiVIFDriver
diff --git a/tools/gate/copy_k8s_logs.sh b/tools/gate/copy_k8s_logs.sh
index 3f9ac9830..c946f60bb 100755
--- a/tools/gate/copy_k8s_logs.sh
+++ b/tools/gate/copy_k8s_logs.sh
@@ -37,6 +37,8 @@ sudo chown ${USER}:${USER} ${HOME}/.kube/config
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetworks -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworks_crds.txt
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get endpoints -o yaml --all-namespaces >> ${K8S_LOG_DIR}/endpoints.txt
/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetpolicy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetpolicy_crds.txt
+/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrport -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrport_crds.txt
+/usr/local/bin/kubectl --kubeconfig=${HOME}/.kube/config get kuryrnetworkpolicy -o yaml --all-namespaces >> ${K8S_LOG_DIR}/kuryrnetworkpolicy_crds.txt
# Kubernetes pods logs
mkdir -p ${K8S_LOG_DIR}/pod_logs
while read -r line
diff --git a/tox.ini b/tox.ini
index 013702fe3..36f314df3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -35,10 +35,15 @@ commands = {posargs}
[testenv:cover]
basepython = python3
+setenv =
+ {[testenv]setenv}
+ PYTHON=coverage run --source kuryr_kubernetes --parallel-mode
commands =
- rm -f .testrepository/times.dbm
- python setup.py test --coverage --testr-args={posargs} \
- --coverage-package-name=kuryr_kubernetes
+ coverage erase
+ stestr run {posargs}
+ coverage combine
+ coverage html -d cover
+ coverage xml -o cover/coverage.xml
coverage report
[testenv:docs]