-
Notifications
You must be signed in to change notification settings - Fork 38.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
always clean gce resources in service e2e #32183
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -80,11 +80,24 @@ var _ = framework.KubeDescribe("Services", func() { | |
f := framework.NewDefaultFramework("services") | ||
|
||
var cs clientset.Interface | ||
serviceLBNames := []string{} | ||
|
||
BeforeEach(func() { | ||
cs = f.ClientSet | ||
}) | ||
|
||
AfterEach(func() { | ||
if CurrentGinkgoTestDescription().Failed { | ||
describeSvc(f.Namespace.Name) | ||
} | ||
for _, lb := range serviceLBNames { | ||
framework.Logf("cleaning gce resource for %s", lb) | ||
cleanupServiceGCEResources(lb) | ||
} | ||
//reset serviceLBNames | ||
serviceLBNames = []string{} | ||
}) | ||
|
||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. | ||
|
||
It("should provide secure master service [Conformance]", func() { | ||
|
@@ -582,6 +595,10 @@ var _ = framework.KubeDescribe("Services", func() { | |
s.Spec.Type = api.ServiceTypeLoadBalancer | ||
}) | ||
} | ||
serviceLBNames = append(serviceLBNames, getLoadBalancerName(tcpService)) | ||
if loadBalancerSupportsUDP { | ||
serviceLBNames = append(serviceLBNames, getLoadBalancerName(udpService)) | ||
} | ||
|
||
By("waiting for the TCP service to have a load balancer") | ||
// Wait for the load balancer to be created asynchronously | ||
|
@@ -1083,6 +1100,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", | |
loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault | ||
|
||
var cs clientset.Interface | ||
serviceLBNames := []string{} | ||
|
||
BeforeEach(func() { | ||
// requires cloud load-balancer support - this feature currently supported only on GCE/GKE | ||
|
@@ -1094,12 +1112,25 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", | |
} | ||
}) | ||
|
||
AfterEach(func() { | ||
if CurrentGinkgoTestDescription().Failed { | ||
describeSvc(f.Namespace.Name) | ||
} | ||
for _, lb := range serviceLBNames { | ||
framework.Logf("cleaning gce resource for %s", lb) | ||
cleanupServiceGCEResources(lb) | ||
} | ||
//reset serviceLBNames | ||
serviceLBNames = []string{} | ||
}) | ||
|
||
It("should work for type=LoadBalancer [Slow][Feature:ExternalTrafficLocalOnly]", func() { | ||
namespace := f.Namespace.Name | ||
serviceName := "external-local" | ||
jig := NewServiceTestJig(cs, serviceName) | ||
|
||
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) | ||
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) | ||
healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc)) | ||
if healthCheckNodePort == 0 { | ||
framework.Failf("Service HealthCheck NodePort was not allocated") | ||
|
@@ -1165,6 +1196,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", | |
nodes := jig.getNodes(maxNodesForEndpointsTests) | ||
|
||
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false) | ||
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) | ||
defer func() { | ||
jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) | ||
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) | ||
|
@@ -1224,6 +1256,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", | |
nodes := jig.getNodes(maxNodesForEndpointsTests) | ||
|
||
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) | ||
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) | ||
defer func() { | ||
jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) | ||
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) | ||
|
@@ -1272,6 +1305,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]", | |
} | ||
|
||
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true) | ||
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) | ||
defer func() { | ||
jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout) | ||
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) | ||
|
@@ -2697,3 +2731,33 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam | |
} | ||
return execPod.Status.PodIP, outputs[1] | ||
} | ||
|
||
func getLoadBalancerName(service *api.Service) string { | ||
//GCE requires that the name of a load balancer starts with a lower case letter. | ||
ret := "a" + string(service.UID) | ||
ret = strings.Replace(ret, "-", "", -1) | ||
//AWS requires that the name of a load balancer is shorter than 32 bytes. | ||
if len(ret) > 32 { | ||
ret = ret[:32] | ||
} | ||
return ret | ||
} | ||
|
||
func cleanupServiceGCEResources(loadBalancerName string) { | ||
if pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { | ||
if err := framework.CleanupGCEResources(loadBalancerName); err != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the way we do this in ingress e2e is first poll on the resources and wait for the servicecontroller to delete them in, say, 5m. If this happens, test pass. Otherwise test fails, but at the end we delete the resources ourselves. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @mfanjie I agree with @bprashanth - I see you have this called from AfterEac - I think we should not cleanup immediately. Lets delete the services, then keep polling to observe that servicecontroller cleans up after service deletion. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the behavior you mentioned existing in current code. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we're adding a test that deletes an lb service, that's why i tagged Girish: https://github.com/kubernetes/kubernetes/pull/31991/files#diff-20a4e2095b63ecd60dd25e78bcd67372R1232 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually the test already exists and I think we just don't cleanup the lb (https://github.com/kubernetes/kubernetes/blob/master/test/e2e/service.go#L1087) but I didn't look too close, maybe we do in some aftereach block |
||
framework.Logf("Still waiting for glbc to cleanup: %v", err) | ||
return false, nil | ||
} | ||
return true, nil | ||
}); pollErr != nil { | ||
framework.Failf("Failed to cleanup service GCE resources.") | ||
} | ||
} | ||
|
||
func describeSvc(ns string) { | ||
framework.Logf("\nOutput of kubectl describe svc:\n") | ||
desc, _ := framework.RunKubectl( | ||
"describe", "svc", fmt.Sprintf("--namespace=%v", ns)) | ||
framework.Logf(desc) | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
do we create all health checks with the same name? I can have multiple nodePorts/health checks per service right?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
HTTP health check name == LB name. We allocate just a single healthCheckNodePort per service today (for ESIPP services).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this line is correct, right? i'd keep it for now.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah it's correct, I assuemd we were doing a health check per taretPort. Girish is verifying the case of one targetPort group failing health checks and another passing, and how that manifests with a single health check per service.