Skip to content

Commit

Permalink
Enable IPsec
Browse files Browse the repository at this point in the history
This patch introduces IPsec enablement for the Cluster Network
Operator using the OVN IPsec functionality.

A new Daemonset is created that hosts libreswan and ovs-monitor-ipsec
which watch for changes in OVN/OVS configuration and update the
Linux XFRM framework appropriately.

This patch also modifies the ovnkube-master Daemonset to enable IPsec across
the cluster. This is done by writing a configuration option to the NB DB.

The patch also modifies the the ovs-node Daemonset to generate
a key pair on initialization, and request for that keypair to be
signed by the signer-ca.

IPsec should only be configurable at cluster installation time.

Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
  • Loading branch information
markdgray committed Dec 3, 2020
1 parent 6983d5c commit f8a0217
Show file tree
Hide file tree
Showing 6 changed files with 301 additions and 0 deletions.
10 changes: 10 additions & 0 deletions README.md
Expand Up @@ -256,6 +256,16 @@ The hybridClusterNetwork `cidr` and hostPrefix are used when adding windows node

There can be at most one hybridClusterNetwork "CIDR". A future version may supports multiple `cidr`.

#### Configuring IPsec with OVNKubernetes
OVNKubernetes supports IPsec encryption of all pod traffic using the OVN IPsec functionality. Add the following to the `spec:` section of the operator config:

```yaml
spec:
defaultNetwork:
type: OVNKubernetes
ovnKubernetesConfig:
ipsecConfig: {}
```

### Configuring Kuryr-Kubernetes
Kuryr-Kubernetes is a CNI plugin that uses OpenStack Neutron to network OpenShift Pods, and OpenStack Octavia to create load balancers for Services. In general it is useful when OpenShift is running on an OpenStack cluster, as you can use the same SDN (OpenStack Neutron) to provide networking for both the VMs OpenShift is running on, and the Pods created by OpenShift. In such case avoidance of double encapsulation gives you two advantages: improved performace (in terms of both latency and throughput) and lower complexity of the networking architecture.
Expand Down
244 changes: 244 additions & 0 deletions bindata/network/ovn-kubernetes-ipsec/ovn-ipsec.yaml
@@ -0,0 +1,244 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovn-ipsec
namespace: openshift-ovn-kubernetes
annotations:
kubernetes.io/description: |
This DaemonSet launches the ovn ipsec networking components for all nodes.
release.openshift.io/version: "{{.ReleaseVersion}}"
spec:
selector:
matchLabels:
app: ovn-ipsec
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: ovn-ipsec
component: network
type: infra
openshift.io/component: network
kubernetes.io/os: "linux"
spec:
serviceAccountName: ovn-kubernetes-node
hostNetwork: true
priorityClassName: "system-node-critical"
initContainers:
- name: ovn-keys
image: "{{.OvnImage}}"
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -exuo pipefail
# Every time we restart this container, we will create a new key pair if
# we are close to key expiration or if we do not already have a signed key pair.
#
# Each node has a key pair which is used by OVS to encrypt/decrypt/authenticate traffic
# between each node. The CA cert is used as the root of trust for all certs so we need
# the CA to sign our certificate signing requests with the CA private key. In this way,
# we can validate that any signed certificates that we receive from other nodes are
# authentic.
echo "Configuring IPsec keys"
# If the certificate does not exist or it will expire in the next 6 months
# (15770000 seconds), we will generate a new one.
if [ ! -e /etc/openvswitch/keys/ipsec-cert.pem ] || [ ! openssl x509 -noout -dates -checkend 15770000 ];
then
# We use the system-id as the CN for our certificate signing request. This
# is a requirement by OVN.
cn=$(ovs-vsctl --retry -t 60 get Open_vSwitch . external-ids:system-id | tr -d "\"")
mkdir -p /etc/openvswitch/keys
# Generate an SSL private key and use the key to create a certitificate signing request
umask 077 && openssl genrsa -out /etc/openvswitch/keys/ipsec-privkey.pem 2048
openssl req -new -text \
-extensions v3_req \
-addext "subjectAltName = DNS:${cn}" \
-subj "/C=US/O=ovnkubernetes/OU=kind/CN=${cn}" \
-key /etc/openvswitch/keys/ipsec-privkey.pem \
-out /etc/openvswitch/keys/ipsec-req.pem
csr_64=$(cat /etc/openvswitch/keys/ipsec-req.pem | base64 | tr -d "\n")
# The signer controller does not allow re-signing a key. We will
# delete the old key to be sure it is not there
kubectl delete --ignore-not-found=true csr/$(hostname)
# Request that our generated certificate signing request is
# signed by the "network.openshift.io/signer" signer that is
# implemented by the CNO signer controller. This will sign the
# certificate signing request using the signer-ca which has been
# set up by the OperatorPKI. In this way, we have a signed certificate
# and our private key has remained private on this host.
cat <<EOF | kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: $(hostname)
spec:
request: ${csr_64}
signerName: network.openshift.io/signer
usages:
- ipsec tunnel
EOF
# Wait until the certificate signing request has been signed.
counter=0
until [ ! -z $(kubectl get csr/$(hostname) -o jsonpath='{.status.certificate}' 2>/dev/null) ]
do
((counter++))
sleep 1
if [ $counter -gt 60 ];
then
echo "Unable to sign certificate after $counter seconds"
exit 1
fi
done
# Decode the signed certificate.
kubectl get csr/$(hostname) -o jsonpath='{.status.certificate}' | base64 -d | openssl x509 -outform pem -text -out /etc/openvswitch/keys/ipsec-cert.pem
kubectl delete csr/$(hostname)
# Get the CA certificate so we can authenticate peer nodes.
cat /signer-ca/ca-bundle.crt | openssl x509 -outform pem -text > /etc/openvswitch/keys/ipsec-cacert.pem
fi
# Configure OVS with the relevant keys for this node. This is required by ovs-monitor-ipsec.
#
# Updating the certificates does not need to be an atomic operation as
# the will get read and loaded into NSS by the ovs-monitor-ipsec process
# which has not started yet.
ovs-vsctl --retry -t 60 set Open_vSwitch . other_config:certificate=/etc/openvswitch/keys/ipsec-cert.pem \
other_config:private_key=/etc/openvswitch/keys/ipsec-privkey.pem \
other_config:ca_cert=/etc/openvswitch/keys/ipsec-cacert.pem
env:
- name: K8S_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-var-run-ovs
- mountPath: /signer-ca
name: signer-ca
- mountPath: /etc/openvswitch
name: etc-openvswitch
terminationMessagePolicy: FallbackToLogsOnError
terminationGracePeriodSeconds: 10
containers:
# ovs-monitor-ipsec and libreswan daemons
- name: ovn-ipsec
image: "{{.OvnImage}}"
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -exuo pipefail
function cleanup()
{
# In order to maintain traffic flows during container restart, we
# need to ensure that xfrm state and policies are not flushed.
# Don't allow ovs monitor to cleanup persistent state
kill $(cat /var/run/openvswitch/ovs-monitor-ipsec.pid 2>/dev/null) 2>/dev/null || true
# Don't allow pluto to clear xfrm state and policies on exit
kill -9 $(cat /var/run/pluto/pluto.pid 2>/devnull) 2>/dev/null || true
/usr/sbin/ipsec --stopnflog
exit 0
}
trap cleanup SIGTERM
# Workaround for https://github.com/libreswan/libreswan/issues/373
ulimit -n 1024
# Make all ipsec state persistent across restarts.
rm -rf /etc/ipsec.conf /etc/ipsec.d /etc/ipsec.secrets
touch /etc/openvswitch/ipsec.conf
touch /etc/openvswitch/ipsec.secrets
mkdir -p /etc/openvswitch/ipsec.d
ln -s /etc/openvswitch/ipsec.conf /etc/ipsec.conf
ln -s /etc/openvswitch/ipsec.d /etc/ipsec.d
ln -s /etc/openvswitch/ipsec.secrets /etc/ipsec.secrets
/usr/libexec/ipsec/addconn --config /etc/openvswitch/ipsec.conf --checkconfig
# Check kernel modules
/usr/libexec/ipsec/_stackmanager start
# Check nss database status
/usr/sbin/ipsec --checknss
# Check nflog setup
/usr/sbin/ipsec --checknflog
# Start the pluto IKE daemon and use the ipsec configuration file that
# persists across container restarts. If the container has been restarted,
# this will contain the previous configuration from ovs-monitor-ipsec
/usr/libexec/ipsec/pluto --leak-detective --config /etc/openvswitch/ipsec.conf --logfile /var/log/openvswitch/libreswan.log
# Environment variables are for workaround for https://mail.openvswitch.org/pipermail/ovs-dev/2020-October/375734.html
# We now start ovs-monitor-ipsec which will monitor for changes in the ovs
# tunnelling configuration (for example addition of a node) and configures
# libreswan appropriately.
OVS_LOGDIR=/var/log/openvswitch OVS_RUNDIR=/var/run/openvswitch OVS_PKGDATADIR=/usr/share/openvswitch /usr/share/openvswitch/scripts/ovs-ctl --ike-daemon=libreswan start-ovs-ipsec
while true; do
sleep 60
done
env:
- name: OVS_LOG_LEVEL
value: info
- name: K8S_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-var-run-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
- mountPath: /etc/openvswitch
name: etc-openvswitch
terminationMessagePolicy: FallbackToLogsOnError
readinessProbe:
exec:
command:
- /bin/bash
- -c
- |
#!/bin/bash
ovs-appctl -t ovs-monitor-ipsec ipsec/status
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 40
terminationGracePeriodSeconds: 10
nodeSelector:
beta.kubernetes.io/os: "linux"
volumes:
- name: host-var-log-ovs
hostPath:
path: /var/log/openvswitch
type: DirectoryOrCreate
- name: host-var-run-ovs
hostPath:
path: /var/run/openvswitch
type: DirectoryOrCreate
- name: signer-ca
configMap:
name: signer-ca
- name: etc-openvswitch
hostPath:
path: /var/lib/openvswitch/etc
type: DirectoryOrCreate
tolerations:
- operator: "Exists"
8 changes: 8 additions & 0 deletions bindata/network/ovn-kubernetes/002-rbac.yaml
Expand Up @@ -65,6 +65,14 @@ rules:
- apiGroups: ['authorization.k8s.io']
resources: ['subjectaccessreviews']
verbs: ['create']
- apiGroups: [certificates.k8s.io]
resources: ['certificatesigningrequests']
verbs:
- create
- get
- delete
- update
- list

---
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
10 changes: 10 additions & 0 deletions bindata/network/ovn-kubernetes/006-signer-pki.yaml
@@ -0,0 +1,10 @@
# Request that the cluster network operator PKI controller
# creates a certificate and key for signing requests.
apiVersion: network.operator.openshift.io/v1
kind: OperatorPKI
metadata:
name: signer
namespace: openshift-ovn-kubernetes
spec:
targetCert:
commonName: ovn-kubernetes-signer
4 changes: 4 additions & 0 deletions bindata/network/ovn-kubernetes/ovnkube-master.yaml
Expand Up @@ -380,6 +380,10 @@ spec:
done
fi
fi
{{ if .EnableIPsec }}
${OVN_NB_CTL} set nb_global . ipsec=true
{{ end }}
preStop:
exec:
command:
Expand Down
25 changes: 25 additions & 0 deletions pkg/network/ovn_kubernetes.go
Expand Up @@ -108,7 +108,22 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo
data.Data["OVNHybridOverlayVXLANPort"] = ""
}

if c.IPsecConfig != nil {
data.Data["EnableIPsec"] = true
// Only render ipsec manifest if ipsec has been enabled at cluster
// installation time. We will never have to delete the ipsec pod
// because it cannot be disabled at runtime
ipsecManifests, err := render.RenderDir(filepath.Join(manifestDir, "network/ovn-kubernetes-ipsec"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render ipsec manifest")
}
objs = append(objs, ipsecManifests...)
} else {
data.Data["EnableIPsec"] = false
}

manifests, err := render.RenderDir(filepath.Join(manifestDir, "network/ovn-kubernetes"), &data)

if err != nil {
return nil, errors.Wrap(err, "failed to render manifests")
}
Expand Down Expand Up @@ -187,6 +202,14 @@ func isOVNKubernetesChangeSafe(prev, next *operv1.NetworkSpec) []error {
errs = append(errs, errors.Errorf("cannot edit a running hybrid overlay network"))
}
}
if pn.IPsecConfig == nil && nn.IPsecConfig != nil {
errs = append(errs, errors.Errorf("cannot enable IPsec after install time"))
}
if pn.IPsecConfig != nil {
if !reflect.DeepEqual(pn.IPsecConfig, nn.IPsecConfig) {
errs = append(errs, errors.Errorf("cannot edit IPsec configuration at runtime"))
}
}

return errs
}
Expand All @@ -202,6 +225,8 @@ func fillOVNKubernetesDefaults(conf, previous *operv1.NetworkSpec, hostMTU int)
// If MTU is not supplied, we infer it from the host on which CNO is running
// (which may not be a node in the cluster).
// However, this can never change, so we always prefer previous.

// TODO - Need to check as IPsec will additional headers
if sc.MTU == nil {
var mtu uint32 = uint32(hostMTU) - 100 // 100 byte geneve header
if previous != nil && previous.DefaultNetwork.OVNKubernetesConfig != nil &&
Expand Down

0 comments on commit f8a0217

Please sign in to comment.