From 765c38d9194abf18f50b00c745bd3787a4d16297 Mon Sep 17 00:00:00 2001 From: Richard Vanderpool <49568690+rvanderp3@users.noreply.github.com> Date: Fri, 9 Oct 2020 16:34:35 -0400 Subject: [PATCH] Unit test for openshift/machine-api-operator/pull/725 --- pkg/framework/pods.go | 14 ++ pkg/framework/proxies.go | 249 ++++++++++++++++++++++++++ pkg/operators/machine-api-operator.go | 71 ++++++++ 3 files changed, 334 insertions(+) create mode 100644 pkg/framework/pods.go create mode 100644 pkg/framework/proxies.go diff --git a/pkg/framework/pods.go b/pkg/framework/pods.go new file mode 100644 index 000000000..6defd98a8 --- /dev/null +++ b/pkg/framework/pods.go @@ -0,0 +1,14 @@ +package framework + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func GetPods(client runtimeclient.Client, selector map[string]string) (*corev1.PodList, error) { + pods := &corev1.PodList{} + err := client.List(context.TODO(), pods, runtimeclient.MatchingLabels(selector)) + return pods, err +} diff --git a/pkg/framework/proxies.go b/pkg/framework/proxies.go new file mode 100644 index 000000000..3eba086af --- /dev/null +++ b/pkg/framework/proxies.go @@ -0,0 +1,249 @@ +package framework + +import ( + "context" + "errors" + + configv1 "github.com/openshift/api/config/v1" + mapiv1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const squidConf = `acl localnet src 192.168.0.0/16 # RFC1918 possible internal network +acl crcnet src 10.88.0.0/16 # RFC1918 possible internal network +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +acl mysubnet src 192.168.0.0/16 +http_access allow localhost localnet crcnet +http_access allow all +http_port 80 +coredump_dir /var/spool/squid +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880 +refresh_pattern . 0 20% 4320` + +// CreateProxyMachineSet creates a machineset that results in a single node being spun up. This node will not be in the worker MCP. +func CreateProxyMachineSet(client runtimeclient.Client) (*mapiv1beta1.MachineSet, error) { + var machineSet *mapiv1beta1.MachineSet + var machineSetParams MachineSetParams + + machineSetParams = BuildMachineSetParams(client, 1) + machineSetParams.Labels["proxy-host"] = "" + machineSetParams.Labels["node-role.kubernetes.io/proxy"] = "" + machineSet, err := CreateMachineSet(client, machineSetParams) + if err != nil { + return nil, err + } + WaitForMachineSet(client, machineSet.GetName()) + + nodes, err := GetNodesFromMachineSet(client, machineSet) + if len(nodes) == 0 { + return nil, errors.New("No nodes found in machineset") + } + node := nodes[0] + delete(node.Labels, "node-role.kubernetes.io/worker") + client.Update(context.Background(), node) + return machineSet, nil +} + +// DeployClusterProxy Deploys an HTTP proxy to the proxy node +func DeployClusterProxy(c runtimeclient.Client) (*appsv1.Deployment, error) { + configMap := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "squid-conf", + Namespace: "default", + }, + Data: map[string]string{ + "squid.conf": squidConf, + }, + } + + c.Create(context.Background(), &configMap) + + deployment := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "squid-proxy", + Namespace: "default", + Labels: map[string]string{ + "app": "squid", + }, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "squid", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "squid-proxy", + Namespace: "default", + Labels: map[string]string{ + "app": "squid", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "conf", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "squid-conf", + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "squid", + Image: "sameersbn/squid:3.5.27-2", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "conf", + ReadOnly: false, + MountPath: "/etc/squid/squid.conf", + SubPath: "squid.conf", + }, + }, + SecurityContext: &corev1.SecurityContext{}, + }, + }, + NodeSelector: map[string]string{ + "proxy-host": "", + }, + HostNetwork: true, + }, + }, + Strategy: appsv1.DeploymentStrategy{ + Type: "Recreate", + }, + Paused: false, + }, + } + c.Create(context.Background(), deployment) + IsDeploymentAvailable(c, deployment.Name, "default") + return deployment, nil +} + +// DestroyClusterProxy destroys the HTTP proxy and associated configmap +func DestroyClusterProxy(c runtimeclient.Client) error { + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "squid-conf", + Namespace: "default", + }, + } + c.Delete(context.Background(), configMap) + + return DeleteDeployment(c, &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "squid-proxy", + Namespace: "default", + }, + }) +} + +// WaitForProxyInjectionSync waits for the deployment to sync with the state of the cluster-proxy +func WaitForProxyInjectionSync(c runtimeclient.Client, name, namespace string, shouldBePresent bool) bool { + if err := wait.PollImmediate(RetryShort, WaitLong, func() (bool, error) { + deployment, err := GetDeployment(c, name, namespace) + if err != nil { + return false, nil + } + hasHttpProxy := false + hasHttpsProxy := false + hasNoProxy := false + for _, container := range deployment.Spec.Template.Spec.Containers { + for _, envVar := range container.Env { + switch envVar.Name { + case "NO_PROXY": + hasNoProxy = true + case "HTTPS_PROXY": + hasHttpsProxy = true + case "HTTP_PROXY": + hasHttpProxy = true + } + } + } + return (hasHttpProxy && + hasHttpsProxy && + hasNoProxy) == shouldBePresent, nil + }); err != nil { + klog.Errorf("Error checking isDeploymentAvailable: %v", err) + return false + } + return true +} + +// GetClusterProxy fetches the global cluster proxy object. +func GetClusterProxy(c runtimeclient.Client) (*configv1.Proxy, error) { + proxy := &configv1.Proxy{} + proxyName := runtimeclient.ObjectKey{ + Name: GlobalInfrastuctureName, + } + + if err := c.Get(context.Background(), proxyName, proxy); err != nil { + return nil, err + } + + return proxy, nil +} + +// SetClusterProxy Configures the cluster-wide proxy +func SetClusterProxy(c runtimeclient.Client, updatedProxy configv1.Proxy) (*configv1.Proxy, error) { + proxy, err := GetClusterProxy(c) + if err != nil { + return nil, err + } + if updatedProxy.Spec.HTTPProxy != "" { + proxy.Spec.HTTPProxy = updatedProxy.Spec.HTTPProxy + } else { + proxy.Spec.HTTPProxy = "" + } + if updatedProxy.Spec.HTTPSProxy != "" { + proxy.Spec.HTTPSProxy = updatedProxy.Spec.HTTPSProxy + } else { + proxy.Spec.HTTPSProxy = "" + } + if updatedProxy.Spec.NoProxy != "" { + proxy.Spec.NoProxy = updatedProxy.Spec.NoProxy + } else { + proxy.Spec.NoProxy = "" + } + if err := c.Update(context.Background(), proxy); err != nil { + return nil, err + } + + return proxy, nil +} diff --git a/pkg/operators/machine-api-operator.go b/pkg/operators/machine-api-operator.go index 20ff15ff4..36153f752 100644 --- a/pkg/operators/machine-api-operator.go +++ b/pkg/operators/machine-api-operator.go @@ -1,11 +1,14 @@ package operators import ( + "context" "fmt" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" "github.com/openshift/cluster-api-actuator-pkg/pkg/framework" + mapiv1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" "k8s.io/utils/pointer" ) @@ -164,3 +167,71 @@ var _ = Describe("[Feature:Operators] Machine API cluster operator status should Expect(framework.IsStatusAvailable(client, "machine-api")).To(BeTrue()) }) }) + +var _ = Describe("[Feature:Operators] When cluster-wide proxy is configured, Machine API cluster operator should ", func() { + var machineSet *mapiv1beta1.MachineSet + var proxyMachineSet *mapiv1beta1.MachineSet + var machineSetParams framework.MachineSetParams + It("reflect the configured proxy in machine-api-controller", func() { + By("creating host for proxy") + client, err := framework.LoadClient() + Expect(err).NotTo(HaveOccurred()) + proxyMachineSet, err = framework.CreateProxyMachineSet(client) + Expect(err).NotTo(HaveOccurred()) + + By("deploying an HTTP proxy") + deployment, err := framework.DeployClusterProxy(client) + Expect(err).NotTo(HaveOccurred()) + Expect(framework.IsDeploymentAvailable(client, deployment.Name, deployment.Namespace)).To(BeTrue()) + + By("configuring cluster-wide proxy") + pods, err := framework.GetPods(client, map[string]string{"app": "squid"}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(pods.Items) > 0).To(BeTrue()) + proxy := configv1.Proxy{} + proxy.Spec.HTTPProxy = "http://" + pods.Items[0].Status.HostIP + ":80" + proxy.Spec.HTTPSProxy = "http://" + pods.Items[0].Status.HostIP + ":80" + framework.SetClusterProxy(client, proxy) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for machine-api-controller deployment to reflect configured cluster-wide proxy") + Expect(framework.WaitForProxyInjectionSync(client, maoManagedDeployment, framework.MachineAPINamespace, true)).To(BeTrue()) + }) + + It("allow a machineset to be created and destroyed", func() { + client, err := framework.LoadClient() + Expect(err).NotTo(HaveOccurred()) + + By("creating a machineset") + + machineSetParams = framework.BuildMachineSetParams(client, 3) + machineSet, err = framework.CreateMachineSet(client, machineSetParams) + Expect(err).ToNot(HaveOccurred()) + framework.WaitForMachineSet(client, machineSet.GetName()) + + By("destroying a machineset") + err = client.Delete(context.Background(), machineSet) + Expect(err).ToNot(HaveOccurred()) + framework.WaitForMachineSetDelete(client, machineSet) + }) + + It("reflect an unconfigured proxy in machine-api-controller", func() { + client, err := framework.LoadClient() + Expect(err).NotTo(HaveOccurred()) + + By("unconfiguring cluster-wide proxy") + proxy := configv1.Proxy{} + proxy.Spec.HTTPProxy = "" + proxy.Spec.HTTPSProxy = "" + framework.SetClusterProxy(client, proxy) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for machine-api-controller deployment to reflect unconfigured cluster-wide proxy") + Expect(framework.WaitForProxyInjectionSync(client, maoManagedDeployment, framework.MachineAPINamespace, false)).To(BeTrue()) + framework.DestroyClusterProxy(client) + + Expect(proxyMachineSet != nil).To(BeTrue()) + framework.DeleteMachineSets(client, proxyMachineSet) + }) + +})