Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug 1906732: Add E2E to ensure Cluster Wide Proxies are honoured in Machine API components #192

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
16 changes: 16 additions & 0 deletions docs/cluster-wide-proxy.md
@@ -0,0 +1,16 @@
# Overview

Confirms the behavior of the machine-api-controller when a [cluster-wide proxy](https://docs.openshift.com/container-platform/4.6/networking/enable-cluster-wide-proxy.html) is configured and unconfigured.

In order to allow the cluster-wide proxy to be configured, a proxy is deployed as a daemonset in the cluster and is
accessed by nodes and pods via the service network. This was done due to variances in security configuration between
cloud providers. Originally, this test deployed a separate standalone node which hosted a proxy via the host network.
This worked for every cloud provider except for AWS.

This test aims to confirm the following:

- Confirm that a reencrypting man in the middle proxy exposing a custom signer is usuable with the machine-api-controller.
i.e. Confirm that the `machine-api-controller` consumes and uses a custom PKI.
- Confirm the `machine-api-controller` deployment can respond to changes in proxy configuration
- Confirm that a machine set can be created and destroyed

87 changes: 87 additions & 0 deletions pkg/framework/daemonset.go
@@ -0,0 +1,87 @@
package framework

import (
"context"
"fmt"

kappsapi "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// GetDaemonset gets deployment object by name and namespace.
func GetDaemonset(c client.Client, name, namespace string) (*kappsapi.DaemonSet, error) {
key := types.NamespacedName{
Namespace: namespace,
Name: name,
}
d := &kappsapi.DaemonSet{}

if err := wait.PollImmediate(RetryMedium, WaitShort, func() (bool, error) {
if err := c.Get(context.TODO(), key, d); err != nil {
klog.Errorf("Error querying api for DaemonSet object %q: %v, retrying...", name, err)
return false, nil
}
return true, nil
}); err != nil {
return nil, fmt.Errorf("error getting DaemonSet %q: %v", name, err)
}
return d, nil
}

// DeleteDaemonset deletes the specified deployment
func DeleteDaemonset(c client.Client, deployment *kappsapi.DaemonSet) error {
return wait.PollImmediate(RetryMedium, WaitShort, func() (bool, error) {
if err := c.Delete(context.TODO(), deployment); err != nil {
klog.Errorf("error querying api for DaemonSet object %q: %v, retrying...", deployment.Name, err)
return false, nil
}
return true, nil
})
}

// UpdateDaemonset updates the specified deployment
func UpdateDaemonset(c client.Client, name, namespace string, updated *kappsapi.DaemonSet) error {
return wait.PollImmediate(RetryMedium, WaitMedium, func() (bool, error) {
d, err := GetDeployment(c, name, namespace)
if err != nil {
klog.Errorf("Error getting DaemonSet: %v", err)
return false, nil
}
if err := c.Patch(context.TODO(), d, client.MergeFrom(updated)); err != nil {
klog.Errorf("error patching DaemonSet object %q: %v, retrying...", name, err)
return false, nil
}
return true, nil
})
}

// IsDaemonsetAvailable returns true if the deployment has one or more availabe replicas
func IsDaemonsetAvailable(c client.Client, name, namespace string) bool {
if err := wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) {
d, err := GetDaemonset(c, name, namespace)
if err != nil {
klog.Errorf("Error getting DaemonSet: %v", err)
return false, nil
}
if d.Status.NumberAvailable == 0 {
klog.Errorf("DaemonSet %q is not available. Status: %s",
d.Name, daemonsetInfo(d))
return false, nil
}
klog.Infof("DaemonSet %q is available. Status: %s",
d.Name, daemonsetInfo(d))
return true, nil
}); err != nil {
klog.Errorf("Error checking IsDaemonsetAvailable: %v", err)
return false
}
return true
}

func daemonsetInfo(d *kappsapi.DaemonSet) string {
return fmt.Sprintf("(ready: %d, available: %d, unavailable: %d)",
d.Status.NumberReady, d.Status.NumberAvailable, d.Status.NumberUnavailable)
}
15 changes: 15 additions & 0 deletions pkg/framework/pods.go
@@ -0,0 +1,15 @@
package framework

import (
"context"

corev1 "k8s.io/api/core/v1"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)

// GetPods returns a list of pods matching the provided selector
func GetPods(client runtimeclient.Client, selector map[string]string) (*corev1.PodList, error) {
pods := &corev1.PodList{}
err := client.List(context.TODO(), pods, runtimeclient.MatchingLabels(selector))
return pods, err
}