From a34b9510eee3ced83965723838b89c80e6a3af31 Mon Sep 17 00:00:00 2001 From: Takaaki Nakama Date: Sat, 8 Aug 2020 00:57:30 +0900 Subject: [PATCH] feat(gce): Add GCP internal http(s) load balancer. (#4725) * fix(provider/google): add internal http(s) load balancer. (#5042) * fix(provider/gce): Fix Internal HTTP LB implementations to use java. * fix(provider/gce): Fix internal http lb cannot be deployed with other global lbs * fix(provider/gce): Fix referencing not existing property * chore(gce): no use ProviderVersion * chore(gce): null check in common place * chore(gce): set subnet purpose default value n/a * chore(gce): refactor internal http lb Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../clouddriver/google/deploy/GCEUtil.groovy | 261 ++++ ...oadBalancerAtomicOperationConverter.groovy | 4 + ...oadBalancerAtomicOperationConverter.groovy | 4 + .../handlers/BasicGoogleDeployHandler.groovy | 20 +- ...bstractEnableDisableAtomicOperation.groovy | 39 + ...ternalHttpLoadBalancerAtomicOperation.java | 497 ++++++++ ...ternalHttpLoadBalancerAtomicOperation.java | 1073 ++++++++++++++++ ...gleLoadBalancerDescriptionValidator.groovy | 29 + .../google/model/GoogleHealthCheck.groovy | 3 + .../google/model/GoogleSubnet.groovy | 2 +- .../google/model/callbacks/Utils.groovy | 33 +- .../GoogleHttpLoadBalancingPolicy.groovy | 2 +- .../GoogleInternalHttpLoadBalancer.java | 79 ++ .../GoogleLoadBalancerType.groovy | 1 + .../GoogleLoadBalancingScheme.groovy | 1 + .../GoogleSessionAffinity.groovy | 2 + ...AbstractGoogleServerGroupCachingAgent.java | 7 + .../GoogleHealthCheckCachingAgent.groovy | 108 +- ...eInternalHttpLoadBalancerCachingAgent.java | 714 +++++++++++ .../GoogleInfrastructureProviderConfig.groovy | 5 + .../view/GoogleClusterProvider.groovy | 14 +- .../view/GoogleLoadBalancerProvider.groovy | 39 +- .../provider/view/GoogleSubnetProvider.groovy | 3 +- ...eServerGroupAtomicOperationUnitSpec.groovy | 6 +- ...eServerGroupAtomicOperationUnitSpec.groovy | 12 +- ...LoadBalancerAtomicOperationUnitSpec.groovy | 791 ++++++++++++ ...LoadBalancerAtomicOperationUnitSpec.groovy | 1104 +++++++++++++++++ 27 files changed, 4770 insertions(+), 83 deletions(-) create mode 100644 clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java create mode 100644 clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java create mode 100644 clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java create mode 100644 clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java create mode 100644 clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy create mode 100644 clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy index 7ba572b9a0a..b1e7a0f6643 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/GCEUtil.groovy @@ -67,6 +67,7 @@ class GCEUtil { public static final String REGIONAL_LOAD_BALANCER_NAMES = "load-balancer-names" public static final String GLOBAL_LOAD_BALANCER_NAMES = "global-load-balancer-names" public static final String BACKEND_SERVICE_NAMES = "backend-service-names" + public static final String REGION_BACKEND_SERVICE_NAMES = "region-backend-service-names" public static final String LOAD_BALANCING_POLICY = "load-balancing-policy" public static final String SELECT_ZONES = 'select-zones' public static final String AUTOSCALING_POLICY = 'autoscaling-policy' @@ -681,6 +682,10 @@ class GCEUtil { return GCE_API_PREFIX + "$projectName/global/sslCertificates/$certName" } + static String buildRegionalCertificateUrl(String projectName, String region, String certName) { + return GCE_API_PREFIX + "$projectName/regions/$region/sslCertificates/$certName" + } + static String buildHttpHealthCheckUrl(String projectName, String healthCheckName) { return GCE_API_PREFIX + "$projectName/global/httpHealthChecks/$healthCheckName" } @@ -693,6 +698,10 @@ class GCEUtil { return GCE_API_PREFIX + "$projectName/global/healthChecks/$healthCheckName" } + static String buildRegionalHealthCheckUrl(String projectName, String region, String healthCheckName) { + return GCE_API_PREFIX + "$projectName/regions/$region/healthChecks/$healthCheckName" + } + static String buildInstanceTemplateUrl(String projectName, String templateName) { return GCE_API_PREFIX + "$projectName/global/instanceTemplates/$templateName" } @@ -1092,6 +1101,72 @@ class GCEUtil { } } + static void addInternalHttpLoadBalancerBackends(Compute compute, + ObjectMapper objectMapper, + String project, + GoogleServerGroup.View serverGroup, + GoogleLoadBalancerProvider googleLoadBalancerProvider, + Task task, + String phase, + GoogleOperationPoller googleOperationPoller, + GoogleExecutorTraits executor) { + String serverGroupName = serverGroup.name + String region = serverGroup.region + Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata + Map metadataMap = buildMapFromMetadata(instanceMetadata) + def internalHttpLoadBalancersInMetadata = metadataMap?.get(REGIONAL_LOAD_BALANCER_NAMES)?.tokenize(",") ?: [] + + def internalHttpLoadBalancersToAddTo = queryAllLoadBalancers(googleLoadBalancerProvider, internalHttpLoadBalancersInMetadata, task, phase) + .findAll { it.loadBalancerType == GoogleLoadBalancerType.INTERNAL_MANAGED } + if (!internalHttpLoadBalancersToAddTo) { + log.warn("Cache call missed for Internal Http load balancers ${internalHttpLoadBalancersInMetadata}, making a call to GCP") + List projectForwardingRules = executor.timeExecute( + compute.forwardingRules().list(project, region), + "compute.forwardingRules.list", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region + ).getItems() + internalHttpLoadBalancersToAddTo = projectForwardingRules.findAll { ForwardingRule forwardingRule -> + forwardingRule.name in serverGroup.loadBalancers && forwardingRule.target && + Utils.getTargetProxyType(forwardingRule.target) in [GoogleTargetProxyType.HTTP, GoogleTargetProxyType.HTTPS] + } + } + + if (internalHttpLoadBalancersToAddTo) { + String policyJson = metadataMap?.get(LOAD_BALANCING_POLICY) + if (!policyJson) { + updateStatusAndThrowNotFoundException("Load Balancing Policy not found for server group ${serverGroupName}", task, phase) + } + GoogleHttpLoadBalancingPolicy policy = objectMapper.readValue(policyJson, GoogleHttpLoadBalancingPolicy) + + List backendServiceNames = metadataMap?.get(REGION_BACKEND_SERVICE_NAMES)?.split(",") ?: [] + if (backendServiceNames) { + backendServiceNames.each { String backendServiceName -> + BackendService backendService = executor.timeExecute( + compute.regionBackendServices().get(project, region, backendServiceName), + "compute.regionBackendServices.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + Backend backendToAdd = backendFromLoadBalancingPolicy(policy) + if (serverGroup.regional) { + backendToAdd.setGroup(buildRegionalServerGroupUrl(project, serverGroup.region, serverGroupName)) + } else { + backendToAdd.setGroup(buildZonalServerGroupUrl(project, serverGroup.zone, serverGroupName)) + } + if (backendService.backends == null) { + backendService.backends = [] + } + backendService.backends << backendToAdd + def updateOp = executor.timeExecute( + compute.regionBackendServices().update(project, region, backendServiceName, backendService), + "compute.regionBackendServices.update", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL) + googleOperationPoller.waitForGlobalOperation(compute, project, updateOp.getName(), null, + task, 'compute.regionBackendService.update', phase) + task.updateStatus phase, "Enabled backend for server group ${serverGroupName} in Internal Http(s) load balancer backend service ${backendServiceName}." + } + } + } + } + static void addSslLoadBalancerBackends(Compute compute, ObjectMapper objectMapper, String project, @@ -1498,6 +1573,67 @@ class GCEUtil { } } + static void destroyInternalHttpLoadBalancerBackends(Compute compute, + String project, + GoogleServerGroup.View serverGroup, + GoogleLoadBalancerProvider googleLoadBalancerProvider, + Task task, + String phase, + GoogleOperationPoller googleOperationPoller, + GoogleExecutorTraits executor) { + def serverGroupName = serverGroup.name + def region = serverGroup.region + def httpLoadBalancersInMetadata = serverGroup?.asg?.get(REGIONAL_LOAD_BALANCER_NAMES) ?: [] + log.debug("Attempting to delete backends for ${serverGroup.name} from the following Internal Http load balancers: ${httpLoadBalancersInMetadata}") + + log.debug("Looking up the following Internal Http load balancers in the cache: ${httpLoadBalancersInMetadata}") + def foundInternalHttpLoadBalancers = googleLoadBalancerProvider.getApplicationLoadBalancers("").findAll { + it.name in serverGroup.loadBalancers && it.loadBalancerType == GoogleLoadBalancerType.INTERNAL_MANAGED + } + if (!foundInternalHttpLoadBalancers) { + log.warn("Cache call missed for Internal Http load balancers ${httpLoadBalancersInMetadata}, making a call to GCP") + List projectForwardingRules = executor.timeExecute( + compute.forwardingRules().list(project, region), + "compute.forwardingRules", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region + ).getItems() + foundInternalHttpLoadBalancers = projectForwardingRules.findAll { ForwardingRule forwardingRule -> + forwardingRule.target && Utils.getTargetProxyType(forwardingRule.target) in [GoogleTargetProxyType.HTTP, GoogleTargetProxyType.HTTPS] && + forwardingRule.name in serverGroup.loadBalancers + } + } + + def notDeleted = httpLoadBalancersInMetadata - (foundInternalHttpLoadBalancers.collect { it.name }) + if (notDeleted) { + log.warn("Could not locate the following Internal Http load balancers: ${notDeleted}. Proceeding with other backend deletions without mutating them.") + } + + if (foundInternalHttpLoadBalancers) { + Metadata instanceMetadata = serverGroup?.launchConfig?.instanceTemplate?.properties?.metadata + Map metadataMap = buildMapFromMetadata(instanceMetadata) + List backendServiceNames = metadataMap?.get(REGION_BACKEND_SERVICE_NAMES)?.split(",") + if (backendServiceNames) { + backendServiceNames.each { String backendServiceName -> + BackendService backendService = executor.timeExecute( + compute.regionBackendServices().get(project, region, backendServiceName), + "compute.regionBackendService.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + backendService?.backends?.removeAll { Backend backend -> + (getLocalName(backend.group) == serverGroupName) && + (Utils.getRegionFromGroupUrl(backend.group) == serverGroup.region) + } + def updateOp = executor.timeExecute( + compute.regionBackendServices().update(project, region, backendServiceName, backendService), + "compute.regionBackendServices.update", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + googleOperationPoller.waitForRegionalOperation(compute, project, region, updateOp.getName(), null, + task, 'compute.regionBackendService.update', phase) + task.updateStatus phase, "Deleted backend for server group ${serverGroupName} from Internal Http(s) load balancer backend service ${backendServiceName}." + } + } + } + } + static Boolean isBackendServiceInUse(List projectUrlMaps, String backendServiceName) { def defaultServicesMatch = projectUrlMaps?.findAll { UrlMap urlMap -> getLocalName(urlMap.getDefaultService()) == backendServiceName @@ -1629,6 +1765,47 @@ class GCEUtil { return retrievedTargetProxy } + def static getRegionTargetProxyFromRule(Compute compute, String project, String region, ForwardingRule forwardingRule, String phase, SafeRetry safeRetry, GoogleExecutorTraits executor) { + String target = forwardingRule.getTarget() + GoogleTargetProxyType targetProxyType = Utils.getTargetProxyType(target) + String targetProxyName = getLocalName(target) + + def operationName + def proxyGet = null + switch (targetProxyType) { + case GoogleTargetProxyType.HTTP: + proxyGet = { executor.timeExecute( + compute.regionTargetHttpProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpProxies.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL) + } + operationName = "compute.regionTargetHttpProxies.get" + break + case GoogleTargetProxyType.HTTPS: + proxyGet = { executor.timeExecute( + compute.regionTargetHttpsProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpsProxies.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL) + } + operationName = "compute.regionTargetHttpsProxies.get" + break + default: + log.warn("Unexpected target proxy type for $targetProxyName in $region.") + return null + break + } + def retrievedTargetProxy = safeRetry.doRetry( + proxyGet, + "Region Target proxy $targetProxyName", + null, + [400, 403, 412], + [], + [action: "get", phase: phase, operation: operationName, (executor.TAG_SCOPE): executor.SCOPE_REGIONAL, (executor.TAG_REGION): region], + executor.registry + ) + return retrievedTargetProxy + } + /** * Deletes an L7/SSL LB global listener, i.e. a global forwarding rule and its target proxy. * @param compute @@ -1718,6 +1895,72 @@ class GCEUtil { return result } } + static Operation deleteRegionalListener(Compute compute, + String project, + String region, + String forwardingRuleName, + String phase, + SafeRetry safeRetry, + GoogleExecutorTraits executor) { + ForwardingRule ruleToDelete = safeRetry.doRetry( + { executor.timeExecute( + compute.forwardingRules().get(project, region, forwardingRuleName), + "compute.forwardingRules.get", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + }, + "forwarding rule ${forwardingRuleName}", + null, + [400, 412], + [404], + [action: "get", phase: phase, operation: "compute.forwardingRules.get", (executor.TAG_SCOPE): executor.SCOPE_REGIONAL, (executor.TAG_REGION): region], + executor.registry + ) as ForwardingRule + if (ruleToDelete) { + def operation_name + executor.timeExecute( + compute.forwardingRules().delete(project, region, ruleToDelete.getName()), + "compute.forwardingRules.delete", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + String targetProxyLink = ruleToDelete.getTarget() + String targetProxyName = getLocalName(targetProxyLink) + GoogleTargetProxyType targetProxyType = Utils.getTargetProxyType(targetProxyLink) + Closure deleteProxyClosure = { null } + switch (targetProxyType) { + case GoogleTargetProxyType.HTTP: + deleteProxyClosure = { + executor.timeExecute( + compute.regionTargetHttpProxies().delete(project, region, targetProxyName), + "compute.regionTargetHttpProxies.delete", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + } + operation_name = "compute.regionTargetHttpProxies.delete" + break + case GoogleTargetProxyType.HTTPS: + deleteProxyClosure = { + executor.timeExecute( + compute.regionTargetHttpsProxies().delete(project, region, targetProxyName), + "compute.regionTargetHttpsProxies.delete", + executor.TAG_SCOPE, executor.SCOPE_REGIONAL, executor.TAG_REGION, region) + } + operation_name = "compute.regionTargetHttpsProxies.delete" + break + default: + log.warn("Unexpected target proxy type for $targetProxyName.") + break + } + + Operation result = safeRetry.doRetry( + deleteProxyClosure, + "region target proxy ${targetProxyName}", + null, + [400, 412], + [404], + [action: "delete", phase: phase, operation: operation_name, (executor.TAG_SCOPE): executor.SCOPE_REGIONAL, (executor.TAG_REGION): region], + executor.registry + ) as Operation + return result + } + } static Operation deleteIfNotInUse(Closure closure, String component, @@ -1970,6 +2213,24 @@ class GCEUtil { return healthChecks } + + static List fetchRegionalHealthChecks(GoogleExecutorTraits agent, Compute compute, String project, String region) { + Boolean executedAtLeastOnce = false + String nextPageToken = null + List healthChecks = [] + while (!executedAtLeastOnce || nextPageToken) { + HealthCheckList healthCheckList = agent.timeExecute( + compute.regionHealthChecks().list(project, region).setPageToken(nextPageToken), + "compute.regionHealthChecks.list", + agent.TAG_SCOPE, agent.SCOPE_REGIONAL, agent.TAG_REGION, region) + + executedAtLeastOnce = true + nextPageToken = healthCheckList.getNextPageToken() + healthChecks.addAll(healthCheckList.getItems() ?: []) + } + return healthChecks + } + static List fetchInstances(GoogleExecutorTraits agent, GoogleNamedAccountCredentials credentials) { List instances = new ArrayList() String pageToken = null diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy index 3f0488ad3ee..a79c6300fa4 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/DeleteGoogleLoadBalancerAtomicOperationConverter.groovy @@ -19,6 +19,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleHttpLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleInternalHttpLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleInternalLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.DeleteGoogleSslLoadBalancerAtomicOperation @@ -42,6 +43,9 @@ class DeleteGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOpe case GoogleLoadBalancerType.HTTP: return new DeleteGoogleHttpLoadBalancerAtomicOperation(description) break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + return new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + break case GoogleLoadBalancerType.INTERNAL: return new DeleteGoogleInternalLoadBalancerAtomicOperation(description) break diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy index 17474fffbe0..d8f4badfcde 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/converters/UpsertGoogleLoadBalancerAtomicOperationConverter.groovy @@ -19,6 +19,7 @@ package com.netflix.spinnaker.clouddriver.google.deploy.converters import com.netflix.spinnaker.clouddriver.google.GoogleOperation import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleHttpLoadBalancerAtomicOperation +import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleInternalHttpLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleInternalLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleLoadBalancerAtomicOperation import com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleSslLoadBalancerAtomicOperation @@ -41,6 +42,9 @@ class UpsertGoogleLoadBalancerAtomicOperationConverter extends AbstractAtomicOpe case GoogleLoadBalancerType.HTTP: return new UpsertGoogleHttpLoadBalancerAtomicOperation(description) break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + return new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + break case GoogleLoadBalancerType.INTERNAL: return new UpsertGoogleInternalLoadBalancerAtomicOperation(description) break diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy index 04734230491..a522bc25783 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/handlers/BasicGoogleDeployHandler.groovy @@ -65,6 +65,7 @@ import org.springframework.stereotype.Component import static com.google.common.base.Preconditions.checkArgument import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.BACKEND_SERVICE_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.LOAD_BALANCING_POLICY import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES @@ -190,6 +191,7 @@ class BasicGoogleDeployHandler implements DeployHandler regionBackendServicesToUpdate = [] - if (internalLoadBalancers) { + if (internalLoadBalancers || internalHttpLoadBalancers) { List existingRegionalLbs = instanceMetadata[REGIONAL_LOAD_BALANCER_NAMES]?.split(",") ?: [] - def ilbServices = internalLoadBalancers.collect { it.backendService.name } - def ilbNames = internalLoadBalancers.collect { it.name } + def ilbServices = internalLoadBalancers.collect { it.backendService.name } + (instanceMetadata[REGION_BACKEND_SERVICE_NAMES]?.split(",") as List) ?: [] + def ilbNames = internalLoadBalancers.collect { it.name } + internalHttpLoadBalancers.collect { it.name } ilbNames.each { String ilbName -> if (!(ilbName in existingRegionalLbs)) { @@ -488,7 +492,7 @@ class BasicGoogleDeployHandler implements DeployHandler namedPorts = [] def sourceGroupName = description?.source?.serverGroupName @@ -517,8 +521,8 @@ class BasicGoogleDeployHandler implements DeployHandler safeRetry.doRetry( updateRegionBackendServices(compute, project, region, backendService.name, backendService), diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy index 61b6aa46c2d..4c420165dc3 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/AbstractEnableDisableAtomicOperation.groovy @@ -129,6 +129,18 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio registry ) + task.updateStatus phaseName, "Deregistering server group from Internal Http(s) load balancers..." + + safeRetry.doRetry( + destroyInternalHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName), + "Internal Http load balancer backends", + task, + RETRY_ERROR_CODES, + SUCCESSFUL_ERROR_CODES, + [operation: "destroyInternalHttpLoadBalancerBackends", action: "destroy", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], + registry + ) + task.updateStatus phaseName, "Deregistering server group from internal load balancers..." safeRetry.doRetry( @@ -239,6 +251,18 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio registry ) + task.updateStatus phaseName, "Registering server group with Internal Http(s) load balancers..." + + safeRetry.doRetry( + addInternalHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName), + "Internal Http load balancer backends", + task, + RETRY_ERROR_CODES, + [], + [operation: "addInternalHttpLoadBalancerBackends", action: "add", phase: phaseName, (TAG_SCOPE): SCOPE_REGIONAL, (TAG_REGION): region], + registry + ) + task.updateStatus phaseName, "Registering server group with Internal load balancers..." safeRetry.doRetry( @@ -383,6 +407,14 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio } } + + Closure destroyInternalHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { + return { + GCEUtil.destroyInternalHttpLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) + null + } + } + Closure destroyInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { GCEUtil.destroyInternalLoadBalancerBackends(compute, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) @@ -411,6 +443,13 @@ abstract class AbstractEnableDisableAtomicOperation extends GoogleAtomicOperatio } } + Closure addInternalHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { + return { + GCEUtil.addInternalHttpLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) + null + } + } + Closure addSslLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName) { return { GCEUtil.addSslLoadBalancerBackends(compute, objectMapper, project, serverGroup, googleLoadBalancerProvider, task, phaseName, googleOperationPoller, this) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..742617cf5c5 --- /dev/null +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperation.java @@ -0,0 +1,497 @@ +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer; + +import static java.lang.String.format; + +import com.google.api.client.json.GenericJson; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.*; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException; +import com.netflix.spinnaker.clouddriver.googlecommon.deploy.GoogleApiException; +import groovy.lang.Closure; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import org.springframework.beans.factory.annotation.Autowired; + +public class DeleteGoogleInternalHttpLoadBalancerAtomicOperation + extends DeleteGoogleLoadBalancerAtomicOperation { + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private static void addServicesFromPathMatchers( + List backendServiceUrls, List pathMatchers) { + if (pathMatchers == null) return; + for (PathMatcher pathMatcher : pathMatchers) { + backendServiceUrls.add(pathMatcher.getDefaultService()); + for (PathRule pathRule : pathMatcher.getPathRules()) { + backendServiceUrls.add(pathRule.getService()); + } + } + } + + private static final String BASE_PHASE = "DELETE_INTERNAL_HTTP_LOAD_BALANCER"; + @Autowired private SafeRetry safeRetry; + @Autowired private GoogleOperationPoller googleOperationPoller; + private DeleteGoogleLoadBalancerDescription description; + + public DeleteGoogleInternalHttpLoadBalancerAtomicOperation( + DeleteGoogleLoadBalancerDescription description) { + this.description = description; + } + + /** + * curl -X POST -H "Content-Type: application/json" -d '[ { "deleteLoadBalancer": { "credentials": + * "my-account-name", "loadBalancerName": "spin-lb", "deleteHealthChecks": false, + * "loadBalancerType": "HTTP"}} ]' localhost:7002/gce/ops + */ + @Override + public Void operate(List priorOutputs) { + getTask() + .updateStatus( + BASE_PHASE, + format( + "Initializing deletion of Internal HTTP load balancer %s...", + description.getLoadBalancerName())); + + if (description.getCredentials() == null) { + throw new IllegalArgumentException( + format( + "Unable to resolve credentials for Google account '%s'.", + description.getAccountName())); + } + + Compute compute = description.getCredentials().getCompute(); + String project = description.getCredentials().getProject(); + String region = description.getRegion(); + String forwardingRuleName = description.getLoadBalancerName(); + + // First we look everything up. Then, we call delete on all of it. Finally, we wait (with + // timeout) for all to complete. + // Start with the forwarding rule. + getTask() + .updateStatus( + BASE_PHASE, + "Retrieving forwarding rule " + forwardingRuleName + " in " + region + "..."); + + List projectForwardingRules = null; + try { + projectForwardingRules = + timeExecute( + compute.forwardingRules().list(project, region), + "compute.forwardingRules.list", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region) + .getItems(); + + ForwardingRule forwardingRule = + projectForwardingRules.stream() + .filter(f -> f.getName().equals(forwardingRuleName)) + .findFirst() + .orElse(null); + if (forwardingRule == null) { + GCEUtil.updateStatusAndThrowNotFoundException( + "Forwarding rule " + forwardingRuleName + " not found in " + region + " for " + project, + getTask(), + BASE_PHASE); + } + + String targetProxyName = GCEUtil.getLocalName(forwardingRule.getTarget()); + // Target HTTP(S) proxy. + getTask().updateStatus(BASE_PHASE, "Retrieving target proxy " + targetProxyName + "..."); + + GenericJson retrievedTargetProxy = + (GenericJson) + GCEUtil.getRegionTargetProxyFromRule( + compute, project, region, forwardingRule, BASE_PHASE, safeRetry, this); + + if (retrievedTargetProxy == null) { + GCEUtil.updateStatusAndThrowNotFoundException( + "Target proxy " + targetProxyName + " not found for " + project + " in " + region, + getTask(), + BASE_PHASE); + } + + final String urlMapName = GCEUtil.getLocalName((String) retrievedTargetProxy.get("urlMap")); + + final List listenersToDelete = new ArrayList(); + for (ForwardingRule rule : projectForwardingRules) { + if (!rule.getLoadBalancingScheme().equals("INTERNAL_MANAGED")) continue; + + try { + GenericJson proxy = + (GenericJson) + GCEUtil.getRegionTargetProxyFromRule( + compute, + project, + region, + rule, + BASE_PHASE, + getSafeRetry(), + DeleteGoogleInternalHttpLoadBalancerAtomicOperation.this); + if (GCEUtil.getLocalName((proxy == null ? null : (String) proxy.get("urlMap"))) + .equals(urlMapName)) { + listenersToDelete.add(rule.getName()); + } + } catch (GoogleOperationException e) { + // 404 is thrown if the target proxy does not exist. + // We can ignore 404's here because we are iterating over all forwarding rules and some + // other process may have + // deleted the target proxy between the time we queried for the list of forwarding rules + // and now. + // Any other exception needs to be propagated. + if (!(e.getCause() instanceof GoogleApiException.NotFoundException)) { + throw e; + } + } + } + + // URL map. + getTask().updateStatus(BASE_PHASE, "Retrieving URL map " + urlMapName + "..."); + + // NOTE: This call is necessary because we cross-check backend services later. + UrlMapList mapList = + timeExecute( + compute.regionUrlMaps().list(project, region), + "compute.regionUrlMaps.list", + TAG_SCOPE, + SCOPE_REGIONAL); + List projectUrlMaps = mapList.getItems(); + UrlMap urlMap = + projectUrlMaps.stream() + .filter(u -> u.getName().equals(urlMapName)) + .findFirst() + .orElseThrow( + () -> new IllegalStateException(format("urlMap %s not found.", urlMapName))); + projectUrlMaps.removeIf(u -> u.getName().equals(urlMapName)); + + List backendServiceUrls = new ArrayList<>(); + backendServiceUrls.add(urlMap.getDefaultService()); + addServicesFromPathMatchers(backendServiceUrls, urlMap.getPathMatchers()); + backendServiceUrls = ImmutableSet.copyOf(backendServiceUrls).asList(); + + // Backend services. Also, get health check URLs. + Set healthCheckUrls = new HashSet<>(); + for (String backendServiceUrl : backendServiceUrls) { + final String backendServiceName = GCEUtil.getLocalName(backendServiceUrl); + getTask() + .updateStatus( + BASE_PHASE, + "Retrieving backend service " + backendServiceName + " in " + region + "..."); + + BackendService backendService = + safeRetry.doRetry( + new Closure(this, this) { + @Override + public BackendService call() { + try { + return timeExecute( + compute.regionBackendServices().get(project, region, backendServiceName), + "compute.regionBackendServices.get", + TAG_SCOPE, + SCOPE_REGIONAL); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Region Backend service " + backendServiceName, + getTask(), + ImmutableList.of(400, 403, 412), + new ArrayList<>(), + ImmutableMap.of( + "action", + "get", + "phase", + BASE_PHASE, + "operation", + "compute.backendServices.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + getRegistry()); + + if (backendService == null) continue; + + if (backendService.getBackends() != null && backendService.getBackends().size() > 0) { + getTask() + .updateStatus( + BASE_PHASE, + "Server groups still associated with Internal Http(s) load balancer " + + description.getLoadBalancerName() + + ". Failing..."); + throw new IllegalStateException( + "Server groups still associated with Internal Http(s) load balancer: " + + description.getLoadBalancerName() + + "."); + } + + healthCheckUrls.addAll(backendService.getHealthChecks()); + } + + final Long timeoutSeconds = description.getDeleteOperationTimeoutSeconds(); + + for (String ruleName : listenersToDelete) { + getTask() + .updateStatus(BASE_PHASE, "Deleting listener " + ruleName + " in " + region + "..."); + + Operation operation = + GCEUtil.deleteRegionalListener( + compute, + project, + region, + ruleName, + BASE_PHASE, + getSafeRetry(), + DeleteGoogleInternalHttpLoadBalancerAtomicOperation.this); + + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + operation.getName(), + timeoutSeconds, + getTask(), + "listener " + ruleName, + BASE_PHASE); + } + + getTask() + .updateStatus(BASE_PHASE, "Deleting URL map " + urlMapName + " in " + region + "..."); + Operation deleteUrlMapOperation = + safeRetry.doRetry( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute.regionUrlMaps().delete(project, region, urlMapName), + "compute.regionUrlMaps.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Url map " + urlMapName, + getTask(), + ImmutableList.of(400, 403, 412), + ImmutableList.of(404), + ImmutableMap.of( + "action", + "delete", + "phase", + BASE_PHASE, + "operation", + "compute.regionUrlMaps.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + getRegistry()); + + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + deleteUrlMapOperation.getName(), + timeoutSeconds, + getTask(), + "Regional url map " + urlMapName, + BASE_PHASE); + + // We make a list of the delete operations for backend services. + List deleteBackendServiceAsyncOperations = + new ArrayList<>(); + for (String backendServiceUrl : backendServiceUrls) { + final String backendServiceName = GCEUtil.getLocalName(backendServiceUrl); + Operation deleteBackendServiceOp = + GCEUtil.deleteIfNotInUse( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute + .regionBackendServices() + .delete(project, region, backendServiceName), + "compute.regionBackendServices.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Backend service " + backendServiceName, + project, + getTask(), + ImmutableMap.of( + "action", + "delete", + "operation", + "compute.regionBackendServices.delete", + "phase", + BASE_PHASE, + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + safeRetry, + this); + if (deleteBackendServiceOp != null) { + deleteBackendServiceAsyncOperations.add( + new BackendServiceAsyncDeleteOperation( + backendServiceName, deleteBackendServiceOp.getName())); + } + } + + // Wait on all of these deletes to complete. + for (BackendServiceAsyncDeleteOperation asyncOperation : + deleteBackendServiceAsyncOperations) { + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + asyncOperation.getOperationName(), + timeoutSeconds, + getTask(), + "Region backend service " + asyncOperation.getBackendServiceName(), + BASE_PHASE); + } + + // Now make a list of the delete operations for health checks if description says to do so. + if (description.getDeleteHealthChecks()) { + List deleteHealthCheckAsyncOperations = new ArrayList<>(); + for (String healthCheckUrl : healthCheckUrls) { + final String healthCheckName = GCEUtil.getLocalName(healthCheckUrl); + Operation deleteHealthCheckOp = + GCEUtil.deleteIfNotInUse( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute.regionHealthChecks().delete(project, region, healthCheckName), + "compute.regionHealthChecks.delete", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "Region Http health check " + healthCheckName, + project, + getTask(), + ImmutableMap.of( + "action", + "delete", + "operation", + "compute.regionHealthChecks.delete", + "phase", + BASE_PHASE, + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + safeRetry, + this); + if (deleteHealthCheckOp != null) { + deleteHealthCheckAsyncOperations.add( + new HealthCheckAsyncDeleteOperation( + healthCheckName, deleteHealthCheckOp.getName())); + } + } + + // Finally, wait on all of these deletes to complete. + for (HealthCheckAsyncDeleteOperation asyncOperation : deleteHealthCheckAsyncOperations) { + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + asyncOperation.getOperationName(), + timeoutSeconds, + getTask(), + "region health check " + asyncOperation.getHealthCheckName(), + BASE_PHASE); + } + } + + getTask() + .updateStatus( + BASE_PHASE, + "Done deleting internal http load balancer " + + description.getLoadBalancerName() + + " in " + + region + + "."); + return null; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public SafeRetry getSafeRetry() { + return safeRetry; + } + + public void setSafeRetry(SafeRetry safeRetry) { + this.safeRetry = safeRetry; + } + + public static class HealthCheckAsyncDeleteOperation { + public HealthCheckAsyncDeleteOperation(String healthCheckName, String operationName) { + this.healthCheckName = healthCheckName; + this.operationName = operationName; + } + + public String getHealthCheckName() { + return healthCheckName; + } + + public String getOperationName() { + return operationName; + } + + private String healthCheckName; + private String operationName; + } + + public static class BackendServiceAsyncDeleteOperation { + public BackendServiceAsyncDeleteOperation(String backendServiceName, String operationName) { + this.backendServiceName = backendServiceName; + this.operationName = operationName; + } + + public String getBackendServiceName() { + return backendServiceName; + } + + public String getOperationName() { + return operationName; + } + + private String backendServiceName; + private String operationName; + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java new file mode 100644 index 00000000000..b1ffd1a49bb --- /dev/null +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperation.java @@ -0,0 +1,1073 @@ +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer; + +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.json.GenericJson; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.*; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.netflix.spinnaker.clouddriver.data.task.Task; +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller; +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry; +import com.netflix.spinnaker.clouddriver.google.deploy.description.BaseGoogleInstanceDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleLoadBalancerDescription; +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException; +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck; +import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork; +import com.netflix.spinnaker.clouddriver.google.model.GoogleSubnet; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.*; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider; +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperation; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationConverter; +import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperationsRegistry; +import com.netflix.spinnaker.clouddriver.orchestration.OrchestrationProcessor; +import groovy.lang.Closure; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; +import org.codehaus.groovy.runtime.StringGroovyMethods; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; + +public class UpsertGoogleInternalHttpLoadBalancerAtomicOperation + extends UpsertGoogleLoadBalancerAtomicOperation { + private static Task getTask() { + return TaskRepository.threadLocalTask.get(); + } + + private static final Logger log = LoggerFactory.getLogger(GoogleInternalHttpLoadBalancer.class); + private static final String BASE_PHASE = "UPSERT_INTERNAL_HTTP_LOAD_BALANCER"; + private static final String PATH_MATCHER_PREFIX = "pm"; + public static final String TARGET_HTTP_PROXY_NAME_PREFIX = "target-http-proxy"; + public static final String TARGET_HTTPS_PROXY_NAME_PREFIX = "target-https-proxy"; + @Autowired private GoogleOperationPoller googleOperationPoller; + @Autowired private AtomicOperationsRegistry atomicOperationsRegistry; + @Autowired private GoogleNetworkProvider googleNetworkProvider; + @Autowired private GoogleSubnetProvider googleSubnetProvider; + @Autowired private OrchestrationProcessor orchestrationProcessor; + @Autowired private SafeRetry safeRetry; + private final UpsertGoogleLoadBalancerDescription description; + + public UpsertGoogleInternalHttpLoadBalancerAtomicOperation( + UpsertGoogleLoadBalancerDescription description) { + this.description = description; + } + + /** + * minimal command: curl -v -X POST -H "Content-Type: application/json" -d '[{ + * "upsertLoadBalancer": {"credentials": "my-google-account", "loadBalancerType": + * "INTERNAL_MANAGED", "loadBalancerName": "internal-http-create", "portRange": "80", + * "backendServiceDiff": [], "defaultService": {"name": "default-backend-service", "backends": [], + * "healthCheck": {"name": "basic-check", "requestPath": "/", "port": 80, "checkIntervalSec": 1, + * "timeoutSec": 1, "healthyThreshold": 1, "unhealthyThreshold": 1}}, "certificate": "", + * "hostRules": [] }}]' localhost:7002/gce/ops + * + *

full command: curl -v -X POST -H "Content-Type: application/json" -d '[{ + * "upsertLoadBalancer": {"credentials": "my-google-account", "loadBalancerType": + * "INTERNAL_MANAGED", "loadBalancerName": "internal-http-create", "portRange": "80", + * "backendServiceDiff": [], "defaultService": {"name": "default-backend-service", "backends": [], + * "healthCheck": {"name": "basic-check", "requestPath": "/", "port": 80, "checkIntervalSec": 1, + * "timeoutSec": 1, "healthyThreshold": 1, "unhealthyThreshold": 1}}, "certificate": "", + * "hostRules": [{"hostPatterns": ["host1.com", "host2.com"], "pathMatcher": {"pathRules": + * [{"paths": ["/path", "/path2/more"], "backendService": {"name": "backend-service", "backends": + * [], "healthCheck": {"name": "health-check", "requestPath": "/", "port": 80, "checkIntervalSec": + * 1, "timeoutSec": 1, "healthyThreshold": 1, "unhealthyThreshold": 1}}}], "defaultService": + * {"name": "pm-backend-service", "backends": [], "healthCheck": {"name": "derp-check", + * "requestPath": "/", "port": 80, "checkIntervalSec": 1, "timeoutSec": 1, "healthyThreshold": 1, + * "unhealthyThreshold": 1}}}}]}}]' localhost:7002/gce/ops + * + * @param description + * @param priorOutputs + * @return + */ + @Override + public Map operate(List priorOutputs) { + GoogleNetwork network = + GCEUtil.queryNetwork( + description.getAccountName(), + description.getNetwork(), + getTask(), + BASE_PHASE, + googleNetworkProvider); + GoogleSubnet subnet = + GCEUtil.querySubnet( + description.getAccountName(), + description.getRegion(), + description.getSubnet(), + getTask(), + BASE_PHASE, + googleSubnetProvider); + GoogleInternalHttpLoadBalancer internalHttpLoadBalancer = new GoogleInternalHttpLoadBalancer(); + + internalHttpLoadBalancer.setName(description.getLoadBalancerName()); + internalHttpLoadBalancer.setUrlMapName(description.getUrlMapName()); + internalHttpLoadBalancer.setDefaultService(description.getDefaultService()); + internalHttpLoadBalancer.setHostRules( + description.getHostRules() != null ? description.getHostRules() : new ArrayList<>()); + internalHttpLoadBalancer.setCertificate(description.getCertificate()); + internalHttpLoadBalancer.setIpAddress(description.getIpAddress()); + internalHttpLoadBalancer.setIpProtocol(description.getIpProtocol()); + internalHttpLoadBalancer.setNetwork(network.getSelfLink()); + internalHttpLoadBalancer.setSubnet(subnet.getSelfLink()); + internalHttpLoadBalancer.setPortRange(description.getPortRange()); + + String internalHttpLoadBalancerName = internalHttpLoadBalancer.getName(); + + getTask() + .updateStatus( + BASE_PHASE, + "Initializing upsert of Internal HTTP load balancer " + + internalHttpLoadBalancerName + + "..."); + + if (description.getCredentials() == null) { + throw new IllegalArgumentException( + "Unable to resolve credentials for Google account '" + + description.getAccountName() + + "'."); + } + + Compute compute = description.getCredentials().getCompute(); + String project = description.getCredentials().getProject(); + String region = description.getRegion(); + + // Step 0: Set up state to formulate a plan for creating or updating the L7 LB. + + Set healthCheckExistsSet = new HashSet<>(); + Set healthCheckNeedsUpdatedSet = new HashSet<>(); + Set serviceExistsSet = new HashSet<>(); + Set serviceNeedsUpdatedSet = new HashSet<>(); + boolean urlMapExists; + boolean targetProxyExists = false; + boolean targetProxyNeedsUpdated = false; + boolean forwardingRuleExists; + + // The following are unique on object equality, not just name. This lets us check if a + // service/hc exists or + // needs updated by _name_ later. + List backendServicesFromDescription = + ImmutableSet.copyOf( + Utils.getBackendServicesFromInternalHttpLoadBalancerView( + internalHttpLoadBalancer.getView())) + .asList(); + List healthChecksFromDescription = + backendServicesFromDescription.stream() + .map(GoogleBackendService::getHealthCheck) + .distinct() + .collect(toList()); + + final String name = internalHttpLoadBalancer.getUrlMapName(); + String urlMapName = + name != null + ? name + : internalHttpLoadBalancerName; // An L7 load balancer is identified by its UrlMap name + // in Google Cloud Console. + + // Get all the existing infrastructure. + + // Look up the legacy health checks so we can do the work to transition smoothly to the UHCs. + try { + List existingHealthChecks = + timeExecute( + compute.regionHealthChecks().list(project, region), + "compute.regionHealthChecks.list", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region) + .getItems(); + List existingServices = + timeExecute( + compute.regionBackendServices().list(project, region), + "compute.regionBackendServices.list", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region) + .getItems(); + UrlMap existingUrlMap = null; + try { + existingUrlMap = + timeExecute( + compute.regionUrlMaps().get(project, region, urlMapName), + "compute.regionUrlMaps.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (GoogleJsonResponseException e) { + // 404 is thrown if the url map doesn't exist. Any other exception needs to be propagated. + if (e.getStatusCode() != 404) { + throw e; + } + } + + // Determine if the infrastructure in the description exists already. + // If it does, check and see if we need to update it from the description. + + // UrlMap + urlMapExists = existingUrlMap != null; + + // ForwardingRule + ForwardingRule existingRule = null; + try { + existingRule = + timeExecute( + compute.forwardingRules().get(project, region, internalHttpLoadBalancerName), + "compute.forwardingRules.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (GoogleJsonResponseException e) { + if (e.getStatusCode() != 404) { + throw e; + } + } + + forwardingRuleExists = existingRule != null; + + // TargetProxy + GenericJson existingProxy = null; + if (forwardingRuleExists) { + String targetProxyName = GCEUtil.getLocalName(existingRule.getTarget()); + switch (Utils.getTargetProxyType(existingRule.getTarget())) { + case HTTP: + existingProxy = + timeExecute( + compute.regionTargetHttpProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpProxies.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + break; + case HTTPS: + existingProxy = + timeExecute( + compute.regionTargetHttpsProxies().get(project, region, targetProxyName), + "compute.regionTargetHttpsProxies.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + if (!StringGroovyMethods.asBoolean(internalHttpLoadBalancer.getCertificate())) { + throw new IllegalArgumentException( + internalHttpLoadBalancerName + + " is an Https load balancer, but the upsert description does not contain a certificate."); + } + + targetProxyNeedsUpdated = + !GCEUtil.getLocalName( + ((TargetHttpsProxy) existingProxy).getSslCertificates().get(0)) + .equals( + GCEUtil.getLocalName( + GCEUtil.buildCertificateUrl( + project, internalHttpLoadBalancer.getCertificate()))); + break; + default: + log.warn("Unexpected target proxy type for " + targetProxyName + "."); + break; + } + targetProxyExists = existingProxy != null; + if (targetProxyExists + && !GCEUtil.getLocalName((String) existingProxy.get("urlMap")) + .equals(description.getUrlMapName())) { + throw new IllegalStateException( + "Listener with name " + + existingRule.getName() + + " already exists and points to url map: " + + GCEUtil.getLocalName((String) existingProxy.get("urlMap")) + + "," + + " which is different from the description url map: " + + description.getUrlMapName() + + "."); + } + } + + // HealthChecks + if (healthChecksFromDescription.size() + != healthChecksFromDescription.stream() + .map(GoogleHealthCheck::getName) + .distinct() + .count()) { + throw new GoogleOperationException( + "Duplicate health checks with different attributes in the description. Please specify one object per named health check."); + } + + for (GoogleHealthCheck healthCheck : healthChecksFromDescription) { + String healthCheckName = healthCheck.getName(); + + existingHealthChecks.stream() + .filter(e -> e.getName().equals(healthCheckName)) + .findFirst() + .ifPresent( + existingHealthCheck -> { + healthCheckExistsSet.add(healthCheck.getName()); + if (GCEUtil.healthCheckShouldBeUpdated(existingHealthCheck, healthCheck)) { + healthCheckNeedsUpdatedSet.add(healthCheck.getName()); + } + }); + } + + // BackendServices + if (backendServicesFromDescription.size() + != backendServicesFromDescription.stream() + .map(GoogleBackendService::getName) + .distinct() + .count()) { + throw new GoogleOperationException( + "Duplicate backend services with different attributes in the description. Please specify one object per named backend service."); + } + + for (GoogleBackendService backendService : backendServicesFromDescription) { + final String backendServiceName = backendService.getName(); + + existingServices.stream() + .filter(e -> e.getName().equals(backendServiceName)) + .findFirst() + .ifPresent( + existingService -> { + serviceExistsSet.add(backendService.getName()); + + Set existingHcs = + existingService.getHealthChecks() == null + ? new HashSet<>() + : existingService.getHealthChecks().stream() + .map(GCEUtil::getLocalName) + .collect(toSet()); + Boolean differentHealthChecks = + Sets.difference( + existingHcs, + ImmutableSet.of(backendService.getHealthCheck().getName())) + .size() + > 0; + Boolean differentSessionAffinity = + !GoogleSessionAffinity.valueOf(existingService.getSessionAffinity()) + .equals(backendService.getSessionAffinity()); + Boolean differentSessionCookieTtl = + !Objects.equals( + existingService.getAffinityCookieTtlSec(), + backendService.getAffinityCookieTtlSec()); + Boolean differentPortName = + !Objects.equals(existingService.getPortName(), backendService.getPortName()); + Integer drainingSec = + existingService.getConnectionDraining() == null + ? 0 + : existingService.getConnectionDraining().getDrainingTimeoutSec(); + Boolean differentConnectionDraining = + !Objects.equals( + drainingSec, backendService.getConnectionDrainingTimeoutSec()); + if (differentHealthChecks + || differentSessionAffinity + || differentSessionCookieTtl + || differentPortName + || differentConnectionDraining) { + serviceNeedsUpdatedSet.add(backendService.getName()); + } + }); + } + + // Step 1: If there are no existing components in GCE, insert the new L7 components. + // If something exists and needs updated, update it. Else do nothing. + + // HealthChecks + for (GoogleHealthCheck healthCheck : healthChecksFromDescription) { + String healthCheckName = healthCheck.getName(); + + if (!healthCheckExistsSet.contains(healthCheck.getName())) { + getTask() + .updateStatus( + BASE_PHASE, "Creating health check " + healthCheckName + " in " + region + "..."); + HealthCheck newHealthCheck = GCEUtil.createNewHealthCheck(healthCheck); + Operation insertHealthCheckOperation = + timeExecute( + compute.regionHealthChecks().insert(project, region, newHealthCheck), + "compute.regionHealthChecks.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertHealthCheckOperation.getName(), + null, + getTask(), + "region health check " + healthCheckName, + BASE_PHASE); + } else if (healthCheckExistsSet.contains(healthCheck.getName()) + && healthCheckNeedsUpdatedSet.contains(healthCheck.getName())) { + getTask().updateStatus(BASE_PHASE, "Updating health check " + healthCheckName + "..."); + HealthCheck hcToUpdate = + existingHealthChecks.stream() + .filter(hc -> hc.getName().equals(healthCheckName)) + .findFirst() + .get(); + GCEUtil.updateExistingHealthCheck(hcToUpdate, healthCheck); + Operation updateHealthCheckOperation = + timeExecute( + compute.regionHealthChecks().update(project, region, healthCheckName, hcToUpdate), + "compute.regionHealthChecks.update", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + updateHealthCheckOperation.getName(), + null, + getTask(), + "region health check " + healthCheckName, + BASE_PHASE); + } + } + + // BackendServices + for (GoogleBackendService backendService : backendServicesFromDescription) { + String backendServiceName = backendService.getName(); + String sessionAffinity = + backendService.getSessionAffinity() != null + ? backendService.getSessionAffinity().toString() + : "NONE"; + + if (!serviceExistsSet.contains(backendService.getName())) { + getTask() + .updateStatus( + BASE_PHASE, + "Creating backend service " + backendServiceName + " in " + region + "..."); + BackendService service = new BackendService(); + + BackendService bs = service.setName(backendServiceName); + service.setLoadBalancingScheme("INTERNAL_MANAGED"); + service.setPortName( + backendService.getPortName() != null + ? backendService.getPortName() + : GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + service.setConnectionDraining( + new ConnectionDraining() + .setDrainingTimeoutSec(backendService.getConnectionDrainingTimeoutSec())); + service.setHealthChecks( + Arrays.asList( + GCEUtil.buildRegionalHealthCheckUrl( + project, region, backendService.getHealthCheck().getName()))); + service.setSessionAffinity(sessionAffinity); + service.setAffinityCookieTtlSec(backendService.getAffinityCookieTtlSec()); + Operation insertBackendServiceOperation = + timeExecute( + compute.regionBackendServices().insert(project, region, bs), + "compute.regionBackendServices.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertBackendServiceOperation.getName(), + null, + getTask(), + "region backend service " + backendServiceName, + BASE_PHASE); + } else if (serviceExistsSet.contains(backendService.getName())) { + // Update the actual backend service if necessary. + if (serviceNeedsUpdatedSet.contains(backendService.getName())) { + getTask() + .updateStatus( + BASE_PHASE, + "Updating backend service " + backendServiceName + " in " + region + "..."); + BackendService bsToUpdate = + existingServices.stream() + .filter(s -> s.getName().equals(backendServiceName)) + .findFirst() + .get(); + String hcName = backendService.getHealthCheck().getName(); + bsToUpdate.setPortName( + backendService.getPortName() != null + ? backendService.getPortName() + : GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + bsToUpdate.setConnectionDraining( + new ConnectionDraining() + .setDrainingTimeoutSec(backendService.getConnectionDrainingTimeoutSec())); + bsToUpdate.setHealthChecks( + Arrays.asList(GCEUtil.buildRegionalHealthCheckUrl(project, region, hcName))); + bsToUpdate.setSessionAffinity(sessionAffinity); + bsToUpdate.setAffinityCookieTtlSec(backendService.getAffinityCookieTtlSec()); + + Operation updateServiceOperation = + timeExecute( + compute + .regionBackendServices() + .update(project, region, backendServiceName, bsToUpdate), + "compute.regionBackendServices.update", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + updateServiceOperation.getName(), + null, + getTask(), + "region backend service " + backendServiceName, + BASE_PHASE); + } + + fixBackendMetadata( + compute, + description.getCredentials(), + project, + getAtomicOperationsRegistry(), + getOrchestrationProcessor(), + description.getLoadBalancerName(), + backendService); + } + } + if (description.getBackendServiceDiff() != null) { + for (GoogleBackendService backendService : description.getBackendServiceDiff()) { + fixBackendMetadata( + compute, + description.getCredentials(), + project, + getAtomicOperationsRegistry(), + getOrchestrationProcessor(), + description.getLoadBalancerName(), + backendService); + } + } + + // UrlMap + String urlMapUrl = null; + if (!urlMapExists) { + getTask() + .updateStatus(BASE_PHASE, "Creating URL map " + urlMapName + " in " + region + "..."); + UrlMap newUrlMap = new UrlMap(); + newUrlMap.setName(urlMapName); + newUrlMap.setHostRules(new ArrayList<>()); + newUrlMap.setPathMatchers(new ArrayList<>()); + newUrlMap.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, internalHttpLoadBalancer.getDefaultService().getName())); + for (GoogleHostRule hostRule : internalHttpLoadBalancer.getHostRules()) { + String pathMatcherName = PATH_MATCHER_PREFIX + "-" + UUID.randomUUID().toString(); + GooglePathMatcher pathMatcher = hostRule.getPathMatcher(); + PathMatcher matcher = new PathMatcher(); + matcher.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, pathMatcher.getDefaultService().getName())); + matcher.setPathRules( + pathMatcher.getPathRules().stream() + .map( + p -> { + PathRule rule = new PathRule(); + rule.setPaths(p.getPaths()); + rule.setService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, p.getBackendService().getName())); + return rule; + }) + .collect(toList())); + newUrlMap.getPathMatchers().add(matcher); + + HostRule rule = new HostRule(); + rule.setHosts(hostRule.getHostPatterns()); + rule.setPathMatcher(pathMatcherName); + newUrlMap.getHostRules().add(rule); + } + Operation insertUrlMapOperation = + timeExecute( + compute.regionUrlMaps().insert(project, region, newUrlMap), + "compute.regionUrlMaps.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertUrlMapOperation.getName(), + null, + getTask(), + "region url map " + urlMapName, + BASE_PHASE); + urlMapUrl = insertUrlMapOperation.getTargetLink(); + } else if (urlMapExists) { + getTask() + .updateStatus(BASE_PHASE, "Updating URL map " + urlMapName + " in " + region + "..."); + existingUrlMap.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, internalHttpLoadBalancer.getDefaultService().getName())); + existingUrlMap.setPathMatchers(new ArrayList<>()); + existingUrlMap.setHostRules(new ArrayList<>()); + for (GoogleHostRule hostRule : internalHttpLoadBalancer.getHostRules()) { + String pathMatcherName = PATH_MATCHER_PREFIX + "-" + UUID.randomUUID().toString(); + GooglePathMatcher pathMatcher = hostRule.getPathMatcher(); + PathMatcher matcher = new com.google.api.services.compute.model.PathMatcher(); + matcher.setName(pathMatcherName); + matcher.setDefaultService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, pathMatcher.getDefaultService().getName())); + matcher.setPathRules( + pathMatcher.getPathRules().stream() + .map( + p -> { + PathRule rule = new PathRule(); + rule.setService( + GCEUtil.buildRegionBackendServiceUrl( + project, region, p.getBackendService().getName())); + rule.setPaths(p.getPaths()); + return rule; + }) + .collect(toList())); + existingUrlMap.getPathMatchers().add(matcher); + HostRule rule = new HostRule(); + rule.setHosts(hostRule.getHostPatterns()); + existingUrlMap.getHostRules().add(rule.setPathMatcher(pathMatcherName)); + } + Operation updateUrlMapOperation = + timeExecute( + compute.regionUrlMaps().update(project, region, urlMapName, existingUrlMap), + "compute.regionUrlMaps.update", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + updateUrlMapOperation.getName(), + null, + getTask(), + "region url map " + urlMapName, + BASE_PHASE); + urlMapUrl = updateUrlMapOperation.getTargetLink(); + } else { + urlMapUrl = existingUrlMap.getSelfLink(); + } + + // TargetProxy + String targetProxyName; + Object targetProxy; + Operation insertTargetProxyOperation; + String targetProxyUrl = null; + if (!targetProxyExists) { + if (!StringUtils.isEmpty(internalHttpLoadBalancer.getCertificate())) { + targetProxyName = internalHttpLoadBalancerName + "-" + TARGET_HTTPS_PROXY_NAME_PREFIX; + getTask() + .updateStatus( + BASE_PHASE, "Creating target proxy " + targetProxyName + " in " + region + "..."); + TargetHttpsProxy proxy = new TargetHttpsProxy(); + proxy.setSslCertificates( + Arrays.asList( + GCEUtil.buildCertificateUrl(project, internalHttpLoadBalancer.getCertificate()))); + proxy.setUrlMap(urlMapUrl); + proxy.setName(targetProxyName); + targetProxy = proxy; + insertTargetProxyOperation = + timeExecute( + compute + .regionTargetHttpsProxies() + .insert(project, region, (TargetHttpsProxy) targetProxy), + "compute.regionTargetHttpsProxies.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } else { + targetProxyName = internalHttpLoadBalancerName + "-" + TARGET_HTTP_PROXY_NAME_PREFIX; + getTask() + .updateStatus( + BASE_PHASE, "Creating target proxy " + targetProxyName + " in " + region + "..."); + TargetHttpProxy proxy = new TargetHttpProxy(); + proxy.setName(targetProxyName); + proxy.setUrlMap(urlMapUrl); + targetProxy = proxy; + insertTargetProxyOperation = + timeExecute( + compute + .regionTargetHttpProxies() + .insert(project, region, (TargetHttpProxy) targetProxy), + "compute.regionTargetHttpProxies.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } + + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + insertTargetProxyOperation.getName(), + null, + getTask(), + "region target proxy " + targetProxyName, + BASE_PHASE); + targetProxyUrl = insertTargetProxyOperation.getTargetLink(); + } else if (targetProxyExists && targetProxyNeedsUpdated) { + GoogleTargetProxyType proxyType = + Utils.getTargetProxyType((String) existingProxy.get("selfLink")); + switch (proxyType) { + case HTTP: + break; + case HTTPS: + targetProxyName = internalHttpLoadBalancerName + "-" + TARGET_HTTPS_PROXY_NAME_PREFIX; + getTask() + .updateStatus( + BASE_PHASE, + "Updating target proxy " + targetProxyName + " in " + region + "..."); + RegionTargetHttpsProxiesSetSslCertificatesRequest request = + new RegionTargetHttpsProxiesSetSslCertificatesRequest(); + RegionTargetHttpsProxiesSetSslCertificatesRequest setSslReq = + request.setSslCertificates( + Arrays.asList( + GCEUtil.buildRegionalCertificateUrl( + project, region, internalHttpLoadBalancer.getCertificate()))); + Operation sslCertOp = + timeExecute( + compute + .regionTargetHttpsProxies() + .setSslCertificates(project, region, targetProxyName, setSslReq), + "compute.regionTargetHttpsProxies.setSslCertificates", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + sslCertOp.getName(), + null, + getTask(), + "set ssl cert " + internalHttpLoadBalancer.getCertificate(), + BASE_PHASE); + UrlMapReference reference = new UrlMapReference(); + UrlMapReference urlMapRef = reference.setUrlMap(urlMapUrl); + Operation setUrlMapOp = + timeExecute( + compute + .regionTargetHttpsProxies() + .setUrlMap(project, region, targetProxyName, urlMapRef), + "compute.regionTargetHttpsProxies.setUrlMap", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + setUrlMapOp.getName(), + null, + getTask(), + "set urlMap " + urlMapUrl + " for target proxy " + targetProxyName, + BASE_PHASE); + targetProxyUrl = setUrlMapOp.getTargetLink(); + break; + default: + throw new IllegalStateException( + "Updating Internal Http load balancer " + + internalHttpLoadBalancerName + + " in " + + region + + " failed. Could not update target proxy; Illegal target proxy type " + + proxyType + + "."); + } + } else { + targetProxyUrl = (String) existingProxy.get("selfLink"); + } + + // ForwardingRule + if (!forwardingRuleExists) { + getTask() + .updateStatus( + BASE_PHASE, + "Creating internal forwarding rule " + + internalHttpLoadBalancerName + + " in " + + region + + "..."); + ForwardingRule rule = new ForwardingRule(); + + rule.setName(internalHttpLoadBalancerName); + rule.setLoadBalancingScheme("INTERNAL_MANAGED"); + rule.setIPAddress(internalHttpLoadBalancer.getIpAddress()); + rule.setIPProtocol(internalHttpLoadBalancer.getIpProtocol()); + rule.setNetwork(internalHttpLoadBalancer.getNetwork()); + rule.setSubnetwork(internalHttpLoadBalancer.getSubnet()); + rule.setPortRange( + StringGroovyMethods.asBoolean(internalHttpLoadBalancer.getCertificate()) + ? "443" + : internalHttpLoadBalancer.getPortRange()); + rule.setTarget(targetProxyUrl); + + Operation forwardingRuleOp = + safeRetry.doRetry( + new Closure(this, this) { + @Override + public Operation call() { + try { + return timeExecute( + compute.forwardingRules().insert(project, region, rule), + "compute.forwardingRules.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }, + "forwarding rule " + description.getLoadBalancerName(), + getTask(), + Arrays.asList(400, 403, 412), + new ArrayList<>(), + ImmutableMap.of( + "action", + "insert", + "phase", + BASE_PHASE, + "operation", + "compute.forwardingRules.insert", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + region), + getRegistry()); + + // Orca's orchestration for upserting a Google load balancer does not contain a task + // to wait for the state of the platform to show that a load balancer was created (for good + // reason, + // that would be a complicated operation). Instead, Orca waits for Clouddriver to execute + // this operation + // and do a force cache refresh. We should wait for the whole load balancer to be created in + // the platform + // before we exit this upsert operation, so we wait for the forwarding rule to be created + // before continuing + // so we _know_ the state of the platform when we do a force cache refresh. + googleOperationPoller.waitForRegionalOperation( + compute, + project, + region, + forwardingRuleOp.getName(), + null, + getTask(), + "forwarding rule " + internalHttpLoadBalancerName, + BASE_PHASE); + } + + // NOTE: there is no update for forwarding rules because we support adding/deleting multiple + // listeners in the frontend. + // Rotating or changing certificates updates the targetProxy only, so the forwarding rule + // doesn't need to change. + + // Delete extraneous listeners. + if (description.getListenersToDelete() != null) { + for (String forwardingRuleName : description.getListenersToDelete()) { + getTask() + .updateStatus( + BASE_PHASE, "Deleting listener " + forwardingRuleName + " in " + region + "..."); + GCEUtil.deleteRegionalListener( + compute, + project, + region, + forwardingRuleName, + BASE_PHASE, + getSafeRetry(), + UpsertGoogleInternalHttpLoadBalancerAtomicOperation.this); + } + } + getTask() + .updateStatus( + BASE_PHASE, + "Done upserting Internal HTTP load balancer " + + internalHttpLoadBalancerName + + " in " + + region); + + Map lb = new HashMap<>(1); + lb.put("name", internalHttpLoadBalancerName); + Map> regionToLb = new HashMap<>(1); + regionToLb.put("region", lb); + + Map>> lbs = new HashMap<>(1); + lbs.put("loadBalancers", regionToLb); + return lbs; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Update each instance template on all the server groups in the backend service to reflect being + * added to the new load balancer. + * + * @param compute + * @param credentials + * @param project + * @param loadBalancerName + * @param backendService + */ + private void fixBackendMetadata( + Compute compute, + GoogleNamedAccountCredentials credentials, + String project, + AtomicOperationsRegistry atomicOperationsRegistry, + OrchestrationProcessor orchestrationProcessor, + String loadBalancerName, + GoogleBackendService backendService) { + if (backendService.getBackends() == null) return; + try { + for (GoogleLoadBalancedBackend backend : backendService.getBackends()) { + + String groupName = Utils.getLocalName(backend.getServerGroupUrl()); + String groupRegion = Utils.getRegionFromGroupUrl(backend.getServerGroupUrl()); + String templateUrl = null; + switch (Utils.determineServerGroupType(backend.getServerGroupUrl())) { + case REGIONAL: + templateUrl = + timeExecute( + compute.regionInstanceGroupManagers().get(project, groupRegion, groupName), + "compute.regionInstanceGroupManagers.get", + TAG_SCOPE, + SCOPE_REGIONAL, + TAG_REGION, + groupRegion) + .getInstanceTemplate(); + break; + case ZONAL: + String groupZone = Utils.getZoneFromGroupUrl(backend.getServerGroupUrl()); + templateUrl = + timeExecute( + compute.instanceGroupManagers().get(project, groupZone, groupName), + "compute.instanceGroupManagers.get", + TAG_SCOPE, + SCOPE_ZONAL, + TAG_ZONE, + groupZone) + .getInstanceTemplate(); + break; + default: + throw new IllegalStateException( + "Server group referenced by " + backend.getServerGroupUrl() + " has illegal type."); + } + + InstanceTemplate template = + timeExecute( + compute.instanceTemplates().get(project, Utils.getLocalName(templateUrl)), + "compute.instancesTemplates.get", + TAG_SCOPE, + SCOPE_GLOBAL); + BaseGoogleInstanceDescription instanceDescription = + GCEUtil.buildInstanceDescriptionFromTemplate(project, template); + + Map templateOpMap = new HashMap<>(15); + templateOpMap.put("image", instanceDescription.getImage()); + templateOpMap.put("instanceType", instanceDescription.getInstanceType()); + templateOpMap.put("credentials", credentials.getName()); + templateOpMap.put("disks", instanceDescription.getDisks()); + templateOpMap.put("instanceMetadata", instanceDescription.getInstanceMetadata()); + templateOpMap.put("tags", instanceDescription.getTags()); + templateOpMap.put("network", instanceDescription.getNetwork()); + templateOpMap.put("subnet", instanceDescription.getSubnet()); + templateOpMap.put("serviceAccountEmail", instanceDescription.getServiceAccountEmail()); + templateOpMap.put("authScopes", instanceDescription.getAuthScopes()); + templateOpMap.put("preemptible", instanceDescription.getPreemptible()); + templateOpMap.put("automaticRestart", instanceDescription.getAutomaticRestart()); + templateOpMap.put("onHostMaintenance", instanceDescription.getOnHostMaintenance()); + templateOpMap.put("region", groupRegion); + templateOpMap.put("serverGroupName", groupName); + + if (StringGroovyMethods.asBoolean(instanceDescription.getMinCpuPlatform())) { + templateOpMap.put("minCpuPlatform", instanceDescription.getMinCpuPlatform()); + } + + if (templateOpMap.containsKey("instanceMetadata")) { + Map instanceMetadata = (Map) templateOpMap.get("instanceMetadata"); + String regionLbStr = instanceMetadata.get(REGIONAL_LOAD_BALANCER_NAMES); + List regionalLbs = + regionLbStr != null + ? new ArrayList<>(Arrays.asList(regionLbStr.split(","))) + : new ArrayList<>(); + regionalLbs.add(loadBalancerName); + instanceMetadata.put( + REGIONAL_LOAD_BALANCER_NAMES, + regionalLbs.stream().distinct().collect(Collectors.joining(","))); + + String backendsStr = instanceMetadata.get(REGION_BACKEND_SERVICE_NAMES); + List bsNames = + backendsStr != null + ? new ArrayList<>(Arrays.asList(backendsStr.split(","))) + : new ArrayList<>(); + bsNames.add(backendService.getName()); + instanceMetadata.put( + REGION_BACKEND_SERVICE_NAMES, + bsNames.stream().distinct().collect(Collectors.joining(","))); + } else { + Map instanceMetadata = new HashMap<>(2); + instanceMetadata.put(REGIONAL_LOAD_BALANCER_NAMES, loadBalancerName); + instanceMetadata.put(REGION_BACKEND_SERVICE_NAMES, backendService.getName()); + templateOpMap.put("instanceMetadata", instanceMetadata); + } + + AtomicOperationConverter converter = + atomicOperationsRegistry.getAtomicOperationConverter( + "modifyGoogleServerGroupInstanceTemplateDescription", "gce"); + AtomicOperation templateOp = converter.convertOperation(templateOpMap); + orchestrationProcessor.process( + new ArrayList<>(Arrays.asList(templateOp)), UUID.randomUUID().toString()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public AtomicOperationsRegistry getAtomicOperationsRegistry() { + return atomicOperationsRegistry; + } + + public void setAtomicOperationsRegistry(AtomicOperationsRegistry atomicOperationsRegistry) { + this.atomicOperationsRegistry = atomicOperationsRegistry; + } + + public OrchestrationProcessor getOrchestrationProcessor() { + return orchestrationProcessor; + } + + public void setOrchestrationProcessor(OrchestrationProcessor orchestrationProcessor) { + this.orchestrationProcessor = orchestrationProcessor; + } + + public SafeRetry getSafeRetry() { + return safeRetry; + } + + public void setSafeRetry(SafeRetry safeRetry) { + this.safeRetry = safeRetry; + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy index 1a4f631bfad..664ff926e9d 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/deploy/validators/UpsertGoogleLoadBalancerDescriptionValidator.groovy @@ -23,6 +23,7 @@ import com.netflix.spinnaker.clouddriver.google.deploy.description.UpsertGoogleL import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType import com.netflix.spinnaker.clouddriver.orchestration.AtomicOperations import com.netflix.spinnaker.clouddriver.security.AccountCredentialsProvider @@ -86,6 +87,34 @@ class UpsertGoogleLoadBalancerDescriptionValidator extends } } break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + + // portRange must be a single port. + try { + Integer.parseInt(description.portRange) + } catch (NumberFormatException _) { + errors.rejectValue("portRange", + "upsertGoogleLoadBalancerDescription.portRange.requireSinglePort") + } + + // Each backend service must have a health check. + def googleInternalHttpLoadBalancer = new GoogleInternalHttpLoadBalancer( + name: description.loadBalancerName, + defaultService: description.defaultService, + hostRules: description.hostRules, + certificate: description.certificate, + ipAddress: description.ipAddress, + ipProtocol: description.ipProtocol, + portRange: description.portRange + ) + List services = Utils.getBackendServicesFromInternalHttpLoadBalancerView(googleInternalHttpLoadBalancer.view) + services?.each { GoogleBackendService service -> + if (!service.healthCheck) { + errors.rejectValue("defaultService OR hostRules.pathMatcher.defaultService OR hostRules.pathMatcher.pathRules.backendService", + "upsertGoogleLoadBalancerDescription.backendServices.healthCheckRequired") + } + } + break case GoogleLoadBalancerType.INTERNAL: helper.validateRegion(description.region, description.credentials) diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy index e56773d3e1b..f0538d1f244 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleHealthCheck.groovy @@ -48,6 +48,8 @@ class GoogleHealthCheck { */ HealthCheckKind kind + String region + /** * Name of the GCP certificate, if HTTPS/SSL. */ @@ -107,6 +109,7 @@ class GoogleHealthCheck { String selfLink = GoogleHealthCheck.this.selfLink String kind = GoogleHealthCheck.this.kind String target = GoogleHealthCheck.this.target + String region = GoogleHealthCheck.this.region } static enum HealthCheckType { diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy index 561938a7cde..929828dd53f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/GoogleSubnet.groovy @@ -30,5 +30,5 @@ class GoogleSubnet implements Subnet { String account String region String selfLink - String purpose = "n/a" + String purpose } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy index 48e0e67a4b4..f959fa2c872 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/callbacks/Utils.groovy @@ -25,6 +25,8 @@ import com.netflix.spinnaker.clouddriver.google.model.GoogleServerGroup import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleBackendService import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHostRule import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer.InternalHttpLbView; import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancedBackend import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GooglePathMatcher @@ -38,6 +40,7 @@ import org.springframework.util.ClassUtils import java.text.SimpleDateFormat import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.BACKEND_SERVICE_NAMES +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES @@ -256,14 +259,24 @@ class Utils { static List getBackendServicesFromHttpLoadBalancerView(GoogleHttpLoadBalancer.View googleLoadBalancer) { List backendServices = [googleLoadBalancer.defaultService] - List pathMatchers = googleLoadBalancer?.hostRules?.collect { GoogleHostRule hostRule -> hostRule.pathMatcher } + collectBackendServicesFromHostRules(googleLoadBalancer?.hostRules, backendServices) + return backendServices; + } + + static List getBackendServicesFromInternalHttpLoadBalancerView(InternalHttpLbView googleLoadBalancer) { + List backendServices = [googleLoadBalancer.defaultService] + collectBackendServicesFromHostRules(googleLoadBalancer?.hostRules, backendServices) + return backendServices + } + + static void collectBackendServicesFromHostRules(List hostRules, List backendServices) { + List pathMatchers = hostRules.collect { GoogleHostRule hostRule -> hostRule.pathMatcher } pathMatchers?.each { GooglePathMatcher pathMatcher -> backendServices << pathMatcher.defaultService pathMatcher?.pathRules?.each { GooglePathRule googlePathRule -> backendServices << googlePathRule.backendService } - } - return backendServices + }?.findAll { it != null } } static List getBackendServicesFromUrlMap(UrlMap urlMap) { @@ -291,6 +304,20 @@ class Utils { return loadBalancer.name in httpLoadBalancersFromMetadata && !(serverGroup.name in backendGroupNames) } + static boolean determineInternalHttpLoadBalancerDisabledState(GoogleInternalHttpLoadBalancer loadBalancer, + GoogleServerGroup serverGroup) { + def loadBalancersFromMetadata = serverGroup.asg.get(REGIONAL_LOAD_BALANCER_NAMES) + def backendServicesFromMetadata = serverGroup.asg.get(REGION_BACKEND_SERVICE_NAMES) + List> serviceBackends = getBackendServicesFromInternalHttpLoadBalancerView(loadBalancer.view) + .findAll { it && it.name in backendServicesFromMetadata } + .collect { it.backends } + List backendGroupNames = serviceBackends.flatten() + .findAll { serverGroup.region == Utils.getRegionFromGroupUrl(it.serverGroupUrl) } + .collect { GCEUtil.getLocalName(it.serverGroupUrl) } + + return loadBalancer.name in loadBalancersFromMetadata && !(serverGroup.name in backendGroupNames) + } + static String decorateXpnResourceIdIfNeeded(String managedProjectId, String xpnResource) { if (!xpnResource) { return xpnResource diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy index 6d9e3e34079..5b17ea448b9 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleHttpLoadBalancingPolicy.groovy @@ -30,7 +30,7 @@ import com.google.api.services.compute.model.NamedPort @JsonIgnoreProperties(ignoreUnknown = true) class GoogleHttpLoadBalancingPolicy extends GoogleLoadBalancingPolicy { @JsonIgnore - static final String HTTP_DEFAULT_PORT_NAME = 'http' + public static final String HTTP_DEFAULT_PORT_NAME = 'http' @JsonIgnore static final Integer HTTP_DEFAULT_PORT = 80 diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java new file mode 100644 index 00000000000..70be27ad106 --- /dev/null +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleInternalHttpLoadBalancer.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.model.loadbalancing; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.ToString; +import lombok.Value; + +@Data +@EqualsAndHashCode(callSuper = true) +@ToString(callSuper = true) +public class GoogleInternalHttpLoadBalancer extends GoogleLoadBalancer { + final GoogleLoadBalancerType type = GoogleLoadBalancerType.INTERNAL_MANAGED; + final GoogleLoadBalancingScheme loadBalancingScheme = GoogleLoadBalancingScheme.INTERNAL_MANAGED; + + /** Default backend service a request is sent to if no host rules are matched. */ + GoogleBackendService defaultService; + + /** List of host rules that map incoming requests to GooglePathMatchers based on host header. */ + List hostRules; + + /** SSL certificate. This is populated only if this load balancer is a HTTPS load balancer. */ + String certificate; + + /** + * The name of the UrlMap this load balancer uses to route traffic. In the Google Cloud Console, + * the L7 load balancer name is the same as this name. + */ + String urlMapName; + + String network; + String subnet; + + @JsonIgnore + public InternalHttpLbView getView() { + return new InternalHttpLbView(); + } + + @Value + @EqualsAndHashCode(callSuper = true) + @ToString(callSuper = true) + public class InternalHttpLbView extends GoogleLoadBalancerView { + GoogleLoadBalancerType loadBalancerType = GoogleInternalHttpLoadBalancer.this.type; + GoogleLoadBalancingScheme loadBalancingScheme = + GoogleInternalHttpLoadBalancer.this.loadBalancingScheme; + + String name = GoogleInternalHttpLoadBalancer.this.getName(); + String account = GoogleInternalHttpLoadBalancer.this.getAccount(); + String region = GoogleInternalHttpLoadBalancer.this.getRegion(); + Long createdTime = GoogleInternalHttpLoadBalancer.this.getCreatedTime(); + String ipAddress = GoogleInternalHttpLoadBalancer.this.getIpAddress(); + String ipProtocol = GoogleInternalHttpLoadBalancer.this.getIpProtocol(); + String portRange = GoogleInternalHttpLoadBalancer.this.getPortRange(); + + GoogleBackendService defaultService = GoogleInternalHttpLoadBalancer.this.defaultService; + List hostRules = GoogleInternalHttpLoadBalancer.this.hostRules; + String certificate = GoogleInternalHttpLoadBalancer.this.certificate; + String urlMapName = GoogleInternalHttpLoadBalancer.this.urlMapName; + String network = GoogleInternalHttpLoadBalancer.this.network; + String subnet = GoogleInternalHttpLoadBalancer.this.subnet; + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy index ea3298bb6e0..d3d2e85c44a 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancerType.groovy @@ -19,6 +19,7 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing enum GoogleLoadBalancerType { HTTP, INTERNAL, + INTERNAL_MANAGED, NETWORK, SSL, TCP diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy index ce466ee371e..92c09e927c2 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleLoadBalancingScheme.groovy @@ -18,4 +18,5 @@ package com.netflix.spinnaker.clouddriver.google.model.loadbalancing enum GoogleLoadBalancingScheme { EXTERNAL, INTERNAL, + INTERNAL_MANAGED, } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy index 19223fb9971..6884fe767af 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/model/loadbalancing/GoogleSessionAffinity.groovy @@ -22,4 +22,6 @@ enum GoogleSessionAffinity { CLIENT_IP_PORT_PROTO, CLIENT_IP_PROTO, GENERATED_COOKIE, + HEADER_FIELD, + HTTP_COOKIE } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java index 14b174c876b..3c4bcc0cf0f 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/AbstractGoogleServerGroupCachingAgent.java @@ -32,6 +32,7 @@ import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.GLOBAL_LOAD_BALANCER_NAMES; import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.LOAD_BALANCING_POLICY; import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGIONAL_LOAD_BALANCER_NAMES; +import static com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil.REGION_BACKEND_SERVICE_NAMES; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; @@ -869,6 +870,12 @@ private void setAutoscalerGroup( BACKEND_SERVICE_NAMES, COMMA.splitToList(metadata.get(BACKEND_SERVICE_NAMES))); } + if (metadata.containsKey(REGION_BACKEND_SERVICE_NAMES)) { + autoscalerGroup.put( + REGION_BACKEND_SERVICE_NAMES, + COMMA.splitToList(metadata.get(REGION_BACKEND_SERVICE_NAMES))); + } + if (metadata.containsKey(LOAD_BALANCING_POLICY)) { try { autoscalerGroup.put( diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy index cbc94afa23b..d13199d963c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleHealthCheckCachingAgent.groovy @@ -90,6 +90,7 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { ret << new GoogleHealthCheck( name: hc.getName(), selfLink: hc.getSelfLink(), + region: "global", healthCheckType: GoogleHealthCheck.HealthCheckType.HTTP, kind: GoogleHealthCheck.HealthCheckKind.httpHealthCheck, port: hc.getPort(), @@ -120,6 +121,7 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { ret << new GoogleHealthCheck( name: hc.getName(), selfLink: hc.getSelfLink(), + region: "global", healthCheckType: GoogleHealthCheck.HealthCheckType.HTTPS, kind: GoogleHealthCheck.HealthCheckKind.httpsHealthCheck, port: hc.getPort(), @@ -146,53 +148,75 @@ class GoogleHealthCheckCachingAgent extends AbstractGoogleCachingAgent { { HealthCheckList list -> list.getItems() }, "compute.healthChecks.list", TAG_SCOPE, SCOPE_GLOBAL ) - healthChecks.each { HealthCheck hc -> - def newHC = new GoogleHealthCheck( - name: hc.getName(), - selfLink: hc.getSelfLink(), - kind: GoogleHealthCheck.HealthCheckKind.healthCheck, - checkIntervalSec: hc.getCheckIntervalSec(), - timeoutSec: hc.getTimeoutSec(), - healthyThreshold: hc.getHealthyThreshold(), - unhealthyThreshold: hc.getUnhealthyThreshold() - ) - - // Health checks of kind 'healthCheck' are all nested -- the actual health check is contained - // in a field inside a wrapper HealthCheck object. The wrapper object specifies the type of nested - // health check as a string, and the proper field is populated based on the type. - switch(hc.getType()) { - case 'HTTP': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTP - newHC.port = hc.getHttpHealthCheck().getPort() - newHC.requestPath = hc.getHttpHealthCheck().getRequestPath() - break - case 'HTTPS': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTPS - newHC.port = hc.getHttpsHealthCheck().getPort() - newHC.requestPath = hc.getHttpsHealthCheck().getRequestPath() - break - case 'TCP': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.TCP - newHC.port = hc.getTcpHealthCheck().getPort() - break - case 'SSL': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.SSL - newHC.port = hc.getSslHealthCheck().getPort() - break - case 'UDP': - newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.UDP - newHC.port = hc.getUdpHealthCheck().getPort() - break - default: - log.warn("Health check ${hc.getName()} has unknown type ${hc.getType()}.") - return - break + ret.addAll(healthChecks.collect { toGoogleHealthCheck(it, "global") }) + def cachingAgent = this + credentials.regions.collect { it.name }.each { String region -> + List regionHealthChecks = new PaginatedRequest(cachingAgent) { + @Override + protected ComputeRequest request (String pageToken) { + return compute.regionHealthChecks().list(project, region).setPageToken(pageToken) + } + + @Override + String getNextPageToken(HealthCheckList t) { + return t.getNextPageToken(); + } } - ret << newHC + .timeExecute( + { HealthCheckList list -> list.getItems() }, + "compute.regionHealthChecks.list", TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region + ) + ret.addAll(regionHealthChecks.collect { toGoogleHealthCheck(it, region) }) } ret } + private static GoogleHealthCheck toGoogleHealthCheck(HealthCheck hc, String region) { + def newHC = new GoogleHealthCheck( + name: hc.getName(), + selfLink: hc.getSelfLink(), + region: region, + kind: GoogleHealthCheck.HealthCheckKind.healthCheck, + checkIntervalSec: hc.getCheckIntervalSec(), + timeoutSec: hc.getTimeoutSec(), + healthyThreshold: hc.getHealthyThreshold(), + unhealthyThreshold: hc.getUnhealthyThreshold() + ) + + // Health checks of kind 'healthCheck' are all nested -- the actual health check is contained + // in a field inside a wrapper HealthCheck object. The wrapper object specifies the type of nested + // health check as a string, and the proper field is populated based on the type. + switch(hc.getType()) { + case 'HTTP': + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTP + newHC.port = hc.getHttpHealthCheck().getPort() + newHC.requestPath = hc.getHttpHealthCheck().getRequestPath() + break + case 'HTTPS': + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.HTTPS + newHC.port = hc.getHttpsHealthCheck().getPort() + newHC.requestPath = hc.getHttpsHealthCheck().getRequestPath() + break + case 'TCP': + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.TCP + newHC.port = hc.getTcpHealthCheck().getPort() + break + case 'SSL': + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.SSL + newHC.port = hc.getSslHealthCheck().getPort() + break + case 'UDP': + newHC.healthCheckType = GoogleHealthCheck.HealthCheckType.UDP + newHC.port = hc.getUdpHealthCheck().getPort() + break + default: + log.warn("Health check ${hc.getName()} has unknown type ${hc.getType()}.") + return + break + } + return newHC + } + private CacheResult buildCacheResult(ProviderCache _, List healthCheckList) { log.debug("Describing items in ${agentType}") diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java new file mode 100644 index 00000000000..e7c509b1c16 --- /dev/null +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/agent/GoogleInternalHttpLoadBalancerCachingAgent.java @@ -0,0 +1,714 @@ +package com.netflix.spinnaker.clouddriver.google.provider.agent; + +import static com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTargetProxyType.HTTP; +import static com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleTargetProxyType.HTTPS; +import static java.util.stream.Collectors.toList; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.api.client.googleapis.batch.json.JsonBatchCallback; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.http.HttpHeaders; +import com.google.api.services.compute.ComputeRequest; +import com.google.api.services.compute.model.*; +import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.clouddriver.google.batch.GoogleBatchRequest; +import com.netflix.spinnaker.clouddriver.google.cache.Keys; +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil; +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck; +import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils; +import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth; +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.*; +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.GroupHealthRequest; +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.LoadBalancerHealthResolution; +import com.netflix.spinnaker.clouddriver.google.provider.agent.util.PaginatedRequest; +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class GoogleInternalHttpLoadBalancerCachingAgent + extends AbstractGoogleLoadBalancerCachingAgent { + private static final Logger log = LoggerFactory.getLogger(GoogleInternalHttpLoadBalancer.class); + + public GoogleInternalHttpLoadBalancerCachingAgent( + String clouddriverUserAgentApplicationName, + GoogleNamedAccountCredentials credentials, + ObjectMapper objectMapper, + Registry registry, + String region) { + super(clouddriverUserAgentApplicationName, credentials, objectMapper, registry, region); + } + + @Override + public List constructLoadBalancers(String onDemandLoadBalancerName) { + List loadBalancers = new ArrayList<>(); + List failedLoadBalancers = new ArrayList<>(); + + GoogleBatchRequest forwardingRulesRequest = buildGoogleBatchRequest(); + GoogleBatchRequest targetProxyRequest = buildGoogleBatchRequest(); + GoogleBatchRequest urlMapRequest = buildGoogleBatchRequest(); + GoogleBatchRequest groupHealthRequest = buildGoogleBatchRequest(); + + // Reset the local getHealth caches/queues each caching agent cycle. + bsNameToGroupHealthsMap = new HashMap<>(); + queuedBsGroupHealthRequests = new HashSet<>(); + resolutions = new HashSet<>(); + + List projectBackendServices = + GCEUtil.fetchRegionBackendServices(this, getCompute(), getProject(), getRegion()); + List projectHealthChecks = + GCEUtil.fetchRegionalHealthChecks(this, getCompute(), getProject(), getRegion()); + + ForwardingRuleCallbacks forwardingRuleCallbacks = + new ForwardingRuleCallbacks( + loadBalancers, + failedLoadBalancers, + targetProxyRequest, + urlMapRequest, + groupHealthRequest, + projectBackendServices, + projectHealthChecks); + + try { + if (onDemandLoadBalancerName != null) { + ForwardingRuleCallbacks.ForwardingRuleSingletonCallback frCallback = + forwardingRuleCallbacks.newForwardingRuleSingletonCallback(); + forwardingRulesRequest.queue( + getCompute().forwardingRules().get(getProject(), getRegion(), onDemandLoadBalancerName), + frCallback); + } else { + ForwardingRuleCallbacks.ForwardingRuleListCallback frlCallback = + forwardingRuleCallbacks.newForwardingRuleListCallback(); + new PaginatedRequest(this) { + @Override + public ComputeRequest request(String pageToken) { + try { + return getCompute() + .forwardingRules() + .list(getProject(), getRegion()) + .setPageToken(pageToken); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public String getNextPageToken(ForwardingRuleList forwardingRuleList) { + return forwardingRuleList.getNextPageToken(); + } + }.queue( + forwardingRulesRequest, frlCallback, "InternalHttpLoadBalancerCaching.forwardingRules"); + } + + executeIfRequestsAreQueued( + forwardingRulesRequest, "InternalHttpLoadBalancerCaching.forwardingRules"); + executeIfRequestsAreQueued(targetProxyRequest, "InternalHttpLoadBalancerCaching.targetProxy"); + executeIfRequestsAreQueued(urlMapRequest, "InternalHttpLoadBalancerCaching.urlMapRequest"); + executeIfRequestsAreQueued(groupHealthRequest, "InternalHttpLoadBalancerCaching.groupHealth"); + + for (LoadBalancerHealthResolution resolution : resolutions) { + for (Object groupHealth : bsNameToGroupHealthsMap.get(resolution.getTarget())) { + GCEUtil.handleHealthObject(resolution.getGoogleLoadBalancer(), groupHealth); + } + } + return loadBalancers.stream() + .filter(lb -> !failedLoadBalancers.contains(lb.getName())) + .collect(toList()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public List constructLoadBalancers() { + return constructLoadBalancers(null); + } + + @Override + public String determineInstanceKey( + GoogleLoadBalancer loadBalancer, GoogleLoadBalancerHealth health) { + // Http load balancers' region is "global", so we have to determine the instance region from its + // zone. + String instanceZone = health.getInstanceZone(); + String instanceRegion = getCredentials().regionFromZone(instanceZone); + + return Keys.getInstanceKey(getAccountName(), instanceRegion, health.getInstanceName()); + } + + /** + * Local cache of BackendServiceGroupHealth keyed by BackendService name. + * + *

It turns out that the types in the GCE Batch callbacks aren't the actual Compute types for + * some reason, which is why this map is String -> Object. + */ + private Map> bsNameToGroupHealthsMap = new HashMap<>(); + + private Set queuedBsGroupHealthRequests = new HashSet(); + private Set resolutions = + new HashSet(); + + public class ForwardingRuleCallbacks { + public ForwardingRuleSingletonCallback newForwardingRuleSingletonCallback() { + return new ForwardingRuleSingletonCallback(); + } + + public ForwardingRuleListCallback newForwardingRuleListCallback() { + return new ForwardingRuleListCallback(); + } + + public void cacheRemainderOfLoadBalancerResourceGraph(final ForwardingRule forwardingRule) { + GoogleInternalHttpLoadBalancer newLoadBalancer = new GoogleInternalHttpLoadBalancer(); + + newLoadBalancer.setName(forwardingRule.getName()); + newLoadBalancer.setAccount(getAccountName()); + newLoadBalancer.setRegion(Utils.getLocalName(forwardingRule.getRegion())); + newLoadBalancer.setCreatedTime( + Utils.getTimeFromTimestamp(forwardingRule.getCreationTimestamp())); + newLoadBalancer.setIpAddress(forwardingRule.getIPAddress()); + newLoadBalancer.setIpProtocol(forwardingRule.getIPProtocol()); + newLoadBalancer.setPortRange(forwardingRule.getPortRange()); + newLoadBalancer.setNetwork(forwardingRule.getNetwork()); + newLoadBalancer.setSubnet(forwardingRule.getSubnetwork()); + newLoadBalancer.setHealths(new ArrayList<>()); + newLoadBalancer.setHostRules(new ArrayList<>()); + loadBalancers.add(newLoadBalancer); + + String targetProxyName = Utils.getLocalName(forwardingRule.getTarget()); + TargetProxyCallback targetProxyCallback = + new TargetProxyCallback( + newLoadBalancer, + urlMapRequest, + groupHealthRequest, + projectBackendServices, + projectHealthChecks, + newLoadBalancer.getName(), + failedLoadBalancers); + + TargetHttpsProxyCallback targetHttpsProxyCallback = + new TargetHttpsProxyCallback( + newLoadBalancer, + urlMapRequest, + groupHealthRequest, + projectBackendServices, + projectHealthChecks, + newLoadBalancer.getName(), + failedLoadBalancers); + + try { + switch (Utils.getTargetProxyType(forwardingRule.getTarget())) { + case HTTP: + targetProxyRequest.queue( + getCompute() + .regionTargetHttpProxies() + .get(getProject(), getRegion(), targetProxyName), + targetProxyCallback); + break; + case HTTPS: + targetProxyRequest.queue( + getCompute() + .regionTargetHttpsProxies() + .get(getProject(), getRegion(), targetProxyName), + targetHttpsProxyCallback); + break; + default: + log.debug( + "Non-Http target type found for global forwarding rule " + + forwardingRule.getName()); + break; + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public List getLoadBalancers() { + return loadBalancers; + } + + public void setLoadBalancers(List loadBalancers) { + this.loadBalancers = loadBalancers; + } + + private List loadBalancers; + private List failedLoadBalancers; + private GoogleBatchRequest targetProxyRequest; + private GoogleBatchRequest urlMapRequest; + private GoogleBatchRequest groupHealthRequest; + private List projectBackendServices; + private List projectHealthChecks; + + public ForwardingRuleCallbacks( + List loadBalancers, + List failedLoadBalancers, + GoogleBatchRequest targetProxyRequest, + GoogleBatchRequest urlMapRequest, + GoogleBatchRequest groupHealthRequest, + List projectBackendServices, + List projectHealthChecks) { + this.loadBalancers = loadBalancers; + this.failedLoadBalancers = failedLoadBalancers; + this.targetProxyRequest = targetProxyRequest; + this.urlMapRequest = urlMapRequest; + this.groupHealthRequest = groupHealthRequest; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + } + + public class ForwardingRuleSingletonCallback extends JsonBatchCallback { + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + // 404 is thrown if the forwarding rule does not exist in the given region. Any other + // exception needs to be propagated. + if (e.getCode() != 404) { + String errorJson = + new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(e); + log.error(errorJson); + } + } + + @Override + public void onSuccess(ForwardingRule forwardingRule, HttpHeaders responseHeaders) + throws IOException { + GoogleTargetProxyType type = + forwardingRule.getTarget() != null + ? Utils.getTargetProxyType(forwardingRule.getTarget()) + : null; + if (type == HTTP || type == HTTPS) { + cacheRemainderOfLoadBalancerResourceGraph(forwardingRule); + } else { + throw new IllegalArgumentException( + "Not responsible for on demand caching of load balancers without target " + + "proxy or with SSL proxy type."); + } + } + } + + public class ForwardingRuleListCallback extends JsonBatchCallback + implements FailureLogger { + @Override + public void onSuccess(ForwardingRuleList forwardingRuleList, HttpHeaders responseHeaders) { + if (forwardingRuleList.getItems() == null) return; + forwardingRuleList.getItems().stream() + .filter( + f -> + f.getLoadBalancingScheme() != null + && f.getLoadBalancingScheme().equals("INTERNAL_MANAGED")) + .forEach( + forwardingRule -> { + GoogleTargetProxyType type = + forwardingRule.getTarget() != null + ? Utils.getTargetProxyType(forwardingRule.getTarget()) + : null; + if (type == HTTP || type == HTTPS) { + cacheRemainderOfLoadBalancerResourceGraph(forwardingRule); + } else { + throw new IllegalArgumentException( + "Not responsible for on demand caching of load balancers without target " + + "proxy or with SSL proxy type."); + } + }); + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) { + log.error(e.getMessage()); + } + } + } + + abstract static class BaseCallback extends JsonBatchCallback { + List failedSubjects; + String subject; + + public BaseCallback(List failedSubjects, String subject) { + this.failedSubjects = failedSubjects; + this.subject = subject; + } + + @Override + public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { + log.warn( + "Failed to read a component of subject " + + subject + + ". The platform error message was:\n" + + e.getMessage() + + ". \nReporting it as 'Failed' to the caching agent. "); + failedSubjects.add(subject); + } + } + + public class TargetHttpsProxyCallback extends BaseCallback { + @Override + public void onSuccess(TargetHttpsProxy targetHttpsProxy, HttpHeaders responseHeaders) + throws IOException { + // SslCertificates is a required field for TargetHttpsProxy, and contains exactly one cert. + googleLoadBalancer.setCertificate( + Utils.getLocalName((targetHttpsProxy.getSslCertificates().get(0)))); + + String urlMapURL = targetHttpsProxy.getUrlMap(); + if (urlMapURL != null) { + UrlMapCallback urlMapCallback = + new UrlMapCallback( + googleLoadBalancer, + projectBackendServices, + projectHealthChecks, + groupHealthRequest, + subject, + failedSubjects); + urlMapRequest.queue( + getCompute() + .regionUrlMaps() + .get(getProject(), getRegion(), Utils.getLocalName(urlMapURL)), + urlMapCallback); + } + } + + private GoogleInternalHttpLoadBalancer googleLoadBalancer; + private GoogleBatchRequest urlMapRequest; + private GoogleBatchRequest groupHealthRequest; + private List projectBackendServices; + private List projectHealthChecks; + + public TargetHttpsProxyCallback( + GoogleInternalHttpLoadBalancer googleLoadBalancer, + GoogleBatchRequest urlMapRequest, + GoogleBatchRequest groupHealthRequest, + List projectBackendServices, + List projectHealthChecks, + String subject, + List failedSubjects) { + super(failedSubjects, subject); + this.googleLoadBalancer = googleLoadBalancer; + this.urlMapRequest = urlMapRequest; + this.groupHealthRequest = groupHealthRequest; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + } + } + + public class TargetProxyCallback extends BaseCallback { + @Override + public void onSuccess(TargetHttpProxy targetHttpProxy, HttpHeaders responseHeaders) + throws IOException { + String urlMapURL = targetHttpProxy.getUrlMap(); + if (urlMapURL != null) { + UrlMapCallback urlMapCallback = + new UrlMapCallback( + googleLoadBalancer, + projectBackendServices, + projectHealthChecks, + groupHealthRequest, + subject, + failedSubjects); + urlMapRequest.queue( + getCompute() + .regionUrlMaps() + .get(getProject(), getRegion(), Utils.getLocalName(urlMapURL)), + urlMapCallback); + } + } + + private GoogleInternalHttpLoadBalancer googleLoadBalancer; + private GoogleBatchRequest urlMapRequest; + private GoogleBatchRequest groupHealthRequest; + private List projectBackendServices; + private List projectHealthChecks; + + public TargetProxyCallback( + GoogleInternalHttpLoadBalancer googleLoadBalancer, + GoogleBatchRequest urlMapRequest, + GoogleBatchRequest groupHealthRequest, + List projectBackendServices, + List projectHealthChecks, + String subject, + List failedSubjects) { + super(failedSubjects, subject); + this.googleLoadBalancer = googleLoadBalancer; + this.urlMapRequest = urlMapRequest; + this.groupHealthRequest = groupHealthRequest; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + } + } + + public class UrlMapCallback extends BaseCallback { + @Override + public void onSuccess(UrlMap urlMap, HttpHeaders responseHeaders) { + // Check that we aren't stomping on our URL map. If we are, log an error. + if (googleLoadBalancer.getDefaultService() != null + || (googleLoadBalancer.getHostRules() != null + && googleLoadBalancer.getHostRules().size() > 0)) { + log.error( + "Overwriting UrlMap " + + urlMap.getName() + + ". You may have a TargetHttp(s)Proxy naming collision."); + } + + googleLoadBalancer.setUrlMapName(urlMap.getName()); + // Queue up the backend services to process. + Set queuedServices = new HashSet<>(); + + // Default service is mandatory. + String urlMapDefaultService = Utils.getLocalName(urlMap.getDefaultService()); + queuedServices.add(urlMapDefaultService); + + GoogleBackendService service1 = new GoogleBackendService(); + service1.setName(urlMapDefaultService); + googleLoadBalancer.setDefaultService(service1); + if (urlMap.getPathMatchers() != null) { + for (PathMatcher pathMatcher : urlMap.getPathMatchers()) { + String pathMatchDefaultService = Utils.getLocalName(pathMatcher.getDefaultService()); + for (HostRule hostRule : urlMap.getHostRules()) { + if (hostRule.getPathMatcher() != null + && hostRule.getPathMatcher().equals(pathMatcher.getName())) { + GoogleBackendService googleBackendService = new GoogleBackendService(); + googleBackendService.setName(pathMatchDefaultService); + + GooglePathMatcher gPathMatcher = new GooglePathMatcher(); + gPathMatcher.setPathRules(new ArrayList<>()); + gPathMatcher.setDefaultService(googleBackendService); + + GoogleHostRule gHostRule = new GoogleHostRule(); + gHostRule.setHostPatterns(hostRule.getHosts()); + gHostRule.setPathMatcher(gPathMatcher); + List collect = + pathMatcher.getPathRules().stream() + .map( + pathRule -> { + GoogleBackendService service = new GoogleBackendService(); + service.setName(Utils.getLocalName(pathRule.getService())); + + GooglePathRule googlePathRule = new GooglePathRule(); + googlePathRule.setPaths(pathRule.getPaths()); + googlePathRule.setBackendService(service); + return googlePathRule; + }) + .collect(toList()); + gPathMatcher.setPathRules(collect); + googleLoadBalancer.getHostRules().add(gHostRule); + } + } + + queuedServices.add(pathMatchDefaultService); + for (PathRule pathRule : pathMatcher.getPathRules()) { + if (pathRule.getService() != null) { + queuedServices.add(Utils.getLocalName(pathRule.getService())); + } + } + } + } + + // Process queued backend services. + for (String queuedService : queuedServices) { + BackendService service = + projectBackendServices.stream() + .filter(bs -> Utils.getLocalName(bs.getName()).equals(queuedService)) + .findFirst() + .get(); + handleBackendService(service, googleLoadBalancer, projectHealthChecks, groupHealthRequest); + } + } + + private GoogleInternalHttpLoadBalancer googleLoadBalancer; + private List projectBackendServices; + private List projectHealthChecks; + private GoogleBatchRequest groupHealthRequest; + + public UrlMapCallback( + GoogleInternalHttpLoadBalancer googleLoadBalancer, + List projectBackendServices, + List projectHealthChecks, + GoogleBatchRequest groupHealthRequest, + String subject, + List failedSubjects) { + super(failedSubjects, subject); + this.googleLoadBalancer = googleLoadBalancer; + this.projectBackendServices = projectBackendServices; + this.projectHealthChecks = projectHealthChecks; + this.groupHealthRequest = groupHealthRequest; + } + } + + public class GroupHealthCallback extends JsonBatchCallback { + /** + * Tolerate of the group health calls failing. Spinnaker reports empty load balancer healths as + * 'unknown'. If healthStatus is null in the onSuccess() function, the same state is reported, + * so this shouldn't cause issues. + */ + public void onFailure(final GoogleJsonError e, HttpHeaders responseHeaders) { + log.debug( + "Failed backend service group health call for backend service " + + getBackendServiceName() + + " for Http load balancer. The platform error message was:\n " + + e.getMessage() + + "."); + } + + @Override + public void onSuccess( + BackendServiceGroupHealth backendServiceGroupHealth, HttpHeaders responseHeaders) { + if (!bsNameToGroupHealthsMap.containsKey(backendServiceName)) { + bsNameToGroupHealthsMap.put( + backendServiceName, new ArrayList<>(Arrays.asList(backendServiceGroupHealth))); + } else { + bsNameToGroupHealthsMap.get(backendServiceName).add(backendServiceGroupHealth); + } + } + + public String getBackendServiceName() { + return backendServiceName; + } + + public void setBackendServiceName(String backendServiceName) { + this.backendServiceName = backendServiceName; + } + + private String backendServiceName; + + public GroupHealthCallback(String backendServiceName) { + this.backendServiceName = backendServiceName; + } + } + + private void handleBackendService( + BackendService backendService, + GoogleInternalHttpLoadBalancer googleHttpLoadBalancer, + List healthChecks, + GoogleBatchRequest groupHealthRequest) { + if (backendService == null) { + return; + } + + final GroupHealthCallback groupHealthCallback = + new GroupHealthCallback(backendService.getName()); + + // We have to update the backend service objects we created from the UrlMapCallback. + // The UrlMapCallback knows which backend service is the defaultService, etc and the + // BackendServiceCallback has the actual serving capacity and server group data. + List backendServicesInMap = + Utils.getBackendServicesFromInternalHttpLoadBalancerView(googleHttpLoadBalancer.getView()); + List backendServicesToUpdate = + backendServicesInMap.stream() + .filter(b -> b.getName().equals(backendService.getName())) + .collect(toList()); + for (GoogleBackendService service : backendServicesToUpdate) { + service.setRegion(googleHttpLoadBalancer.getRegion()); + service.setSessionAffinity( + GoogleSessionAffinity.valueOf(backendService.getSessionAffinity())); + service.setAffinityCookieTtlSec(backendService.getAffinityCookieTtlSec()); + service.setEnableCDN(backendService.getEnableCDN()); + String name = backendService.getPortName(); + service.setPortName( + name != null ? name : GoogleHttpLoadBalancingPolicy.HTTP_DEFAULT_PORT_NAME); + ConnectionDraining draining = backendService.getConnectionDraining(); + service.setConnectionDrainingTimeoutSec( + draining == null ? 0 : draining.getDrainingTimeoutSec()); + // Note: It's possible for a backend service to have backends that point to a null group. + if (backendService.getBackends() != null) { + List backends = + backendService.getBackends().stream() + .filter(backend -> backend.getGroup() != null) + .map( + backend -> { + GoogleLoadBalancedBackend googleBackend = new GoogleLoadBalancedBackend(); + googleBackend.setPolicy(GCEUtil.loadBalancingPolicyFromBackend(backend)); + googleBackend.setServerGroupUrl(backend.getGroup()); + return googleBackend; + }) + .collect(toList()); + service.setBackends(backends); + } + } + + // Note: It's possible for a backend service to have backends that point to a null group. + if (backendService.getBackends() != null) { + backendService.getBackends().stream() + .filter(backend -> backend.getGroup() != null) + .forEach( + backend -> { + ResourceGroupReference resourceGroup = new ResourceGroupReference(); + resourceGroup.setGroup(backend.getGroup()); + + // Make only the group health request calls we need to. + GroupHealthRequest ghr = + new GroupHealthRequest( + getProject(), backendService.getName(), resourceGroup.getGroup()); + if (!queuedBsGroupHealthRequests.contains(ghr)) { + // The groupHealthCallback updates the local cache. + log.debug("Queueing a batch call for getHealth(): {}", ghr); + queuedBsGroupHealthRequests.add(ghr); + try { + groupHealthRequest.queue( + getCompute() + .regionBackendServices() + .getHealth( + getProject(), getRegion(), backendService.getName(), resourceGroup), + groupHealthCallback); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + log.debug("Passing, batch call result cached for getHealth(): {}", ghr); + } + resolutions.add( + new LoadBalancerHealthResolution( + googleHttpLoadBalancer, backendService.getName())); + }); + } + for (String healthCheckURL : backendService.getHealthChecks()) { + String healthCheckName = Utils.getLocalName(healthCheckURL); + HealthCheck healthCheck = + healthChecks.stream() + .filter(hc -> Utils.getLocalName(hc.getName()).equals(healthCheckName)) + .findFirst() + .get(); + handleHealthCheck(healthCheck, backendServicesToUpdate); + } + } + + private static void handleHealthCheck( + final HealthCheck healthCheck, List googleBackendServices) { + if (healthCheck == null) return; + + Integer port = null; + GoogleHealthCheck.HealthCheckType hcType = null; + String requestPath = null; + if (healthCheck.getTcpHealthCheck() != null) { + port = healthCheck.getTcpHealthCheck().getPort(); + hcType = GoogleHealthCheck.HealthCheckType.TCP; + } else if (healthCheck.getSslHealthCheck() != null) { + port = healthCheck.getSslHealthCheck().getPort(); + hcType = GoogleHealthCheck.HealthCheckType.SSL; + } else if (healthCheck.getHttpHealthCheck() != null) { + port = healthCheck.getHttpHealthCheck().getPort(); + requestPath = healthCheck.getHttpHealthCheck().getRequestPath(); + hcType = GoogleHealthCheck.HealthCheckType.HTTP; + } else if (healthCheck.getHttpsHealthCheck() != null) { + port = healthCheck.getHttpsHealthCheck().getPort(); + requestPath = healthCheck.getHttpsHealthCheck().getRequestPath(); + hcType = GoogleHealthCheck.HealthCheckType.HTTPS; + } else if (healthCheck.getUdpHealthCheck() != null) { + port = healthCheck.getUdpHealthCheck().getPort(); + hcType = GoogleHealthCheck.HealthCheckType.UDP; + } + + if (port != null && hcType != null) { + for (GoogleBackendService googleBackendService : googleBackendServices) { + GoogleHealthCheck googleHealthCheck = new GoogleHealthCheck(); + googleHealthCheck.setName(healthCheck.getName()); + googleHealthCheck.setRequestPath(requestPath); + googleHealthCheck.setSelfLink(healthCheck.getSelfLink()); + googleHealthCheck.setPort(port); + googleHealthCheck.setHealthCheckType(hcType); + googleHealthCheck.setCheckIntervalSec(healthCheck.getCheckIntervalSec()); + googleHealthCheck.setTimeoutSec(healthCheck.getTimeoutSec()); + googleHealthCheck.setUnhealthyThreshold(healthCheck.getUnhealthyThreshold()); + googleHealthCheck.setHealthyThreshold(healthCheck.getHealthyThreshold()); + googleHealthCheck.setRegion(healthCheck.getRegion()); + googleBackendService.setHealthCheck(googleHealthCheck); + } + } + } +} diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy index 08a2aef1fed..2902f89fcd1 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/config/GoogleInfrastructureProviderConfig.groovy @@ -153,6 +153,11 @@ class GoogleInfrastructureProviderConfig { objectMapper, registry, region) + newlyAddedAgents << new GoogleInternalHttpLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, + credentials, + objectMapper, + registry, + region) newlyAddedAgents << new GoogleNetworkLoadBalancerCachingAgent(clouddriverUserAgentApplicationName, credentials, objectMapper, diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy index b5dab63ceef..c7f5f640ce6 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleClusterProvider.groovy @@ -28,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.google.model.* import com.netflix.spinnaker.clouddriver.google.model.callbacks.Utils import com.netflix.spinnaker.clouddriver.google.model.health.GoogleLoadBalancerHealth import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleHttpLoadBalancer +import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalHttpLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleInternalLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancer import com.netflix.spinnaker.clouddriver.google.model.loadbalancing.GoogleLoadBalancerType @@ -220,7 +221,7 @@ class GoogleClusterProvider implements ClusterProvider { serverGroupData.each { CacheData serverGroupCacheData -> GoogleServerGroup serverGroup = serverGroupFromCacheData(serverGroupCacheData, clusterView.accountName, instances, securityGroups, loadBalancers) clusterView.serverGroups << serverGroup.view - clusterView.loadBalancers.addAll(serverGroup.loadBalancers*.view) + clusterView.loadBalancers.addAll(serverGroup.loadBalancers) } log.debug("Server groups added to cluster: ${clusterView?.serverGroups?.collect { it?.name }}") } @@ -238,6 +239,9 @@ class GoogleClusterProvider implements ClusterProvider { case GoogleLoadBalancerType.HTTP: loadBalancer = objectMapper.convertValue(it.attributes, GoogleHttpLoadBalancer) break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + loadBalancer = objectMapper.convertValue(it.attributes, GoogleInternalHttpLoadBalancer) + break case GoogleLoadBalancerType.NETWORK: loadBalancer = objectMapper.convertValue(it.attributes, GoogleNetworkLoadBalancer) break @@ -301,6 +305,11 @@ class GoogleClusterProvider implements ClusterProvider { Utils.determineHttpLoadBalancerDisabledState(loadBalancer, serverGroup) } + def internalHttpLoadBalancers = loadBalancers.findAll { it.type == GoogleLoadBalancerType.INTERNAL_MANAGED } + def internalHttpDisabledStates = internalHttpLoadBalancers.collect { loadBalancer -> + Utils.determineInternalHttpLoadBalancerDisabledState(loadBalancer, serverGroup) + } + def sslLoadBalancers = loadBalancers.findAll { it.type == GoogleLoadBalancerType.SSL } def sslDisabledStates = sslLoadBalancers.collect { loadBalancer -> Utils.determineSslLoadBalancerDisabledState(loadBalancer, serverGroup) @@ -331,6 +340,9 @@ class GoogleClusterProvider implements ClusterProvider { if (internalDisabledStates) { isDisabled &= internalDisabledStates.every { it } } + if (internalHttpDisabledStates) { + isDisabled &= internalHttpDisabledStates.every { it } + } if (sslDisabledStates) { isDisabled &= sslDisabledStates.every { it } } diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy index b49db1e74bf..79dff8dc61c 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleLoadBalancerProvider.groovy @@ -89,6 +89,9 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider - backendServices << hostRule?.pathMatcher?.defaultService?.name - hostRule?.pathMatcher?.pathRules?.each { GooglePathRule pathRule -> - backendServices << pathRule.backendService.name - } - } + backendServices = Utils.getBackendServicesFromHttpLoadBalancerView(httpView).collect { it.name } + urlMapName = httpView.urlMapName + break + case (GoogleLoadBalancerType.INTERNAL_MANAGED): + GoogleInternalHttpLoadBalancer.InternalHttpLbView httpView = view as GoogleInternalHttpLoadBalancer.InternalHttpLbView + backendServices = Utils.getBackendServicesFromInternalHttpLoadBalancerView(httpView).collect { it.name } urlMapName = httpView.urlMapName break case (GoogleLoadBalancerType.INTERNAL): @@ -261,14 +264,6 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider backendServices = Utils.getBackendServicesFromHttpLoadBalancerView(httpView) - backendServices?.each { GoogleBackendService backendService -> - backendServiceHealthChecks[backendService.name] = backendService.healthCheck.view - } - } - String instancePort String loadBalancerPort String sessionAffinity @@ -282,6 +277,16 @@ class GoogleLoadBalancerProvider implements LoadBalancerProvider backendServices = Utils.getBackendServicesFromHttpLoadBalancerView(httpView) + backendServiceHealthChecks = backendServices.collectEntries { [it.name, it.healthCheck.view] } + break + case GoogleLoadBalancerType.INTERNAL_MANAGED: + instancePort = 'http' + loadBalancerPort = Utils.derivePortOrPortRange(view.portRange) + GoogleInternalHttpLoadBalancer.InternalHttpLbView httpView = view as GoogleInternalHttpLoadBalancer.InternalHttpLbView + List backendServices = Utils.getBackendServicesFromInternalHttpLoadBalancerView(httpView) + backendServiceHealthChecks = backendServices.collectEntries { [it.name, it.healthCheck.view] } break case GoogleLoadBalancerType.INTERNAL: GoogleInternalLoadBalancer.View ilbView = view as GoogleInternalLoadBalancer.View diff --git a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy index 0dfa1c5d9f6..42f77b17982 100644 --- a/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy +++ b/clouddriver-google/src/main/groovy/com/netflix/spinnaker/clouddriver/google/provider/view/GoogleSubnetProvider.groovy @@ -77,7 +77,8 @@ class GoogleSubnetProvider implements SubnetProvider { cidrBlock: subnet.ipCidrRange, account: parts.account, region: parts.region, - selfLink: subnet.selfLink + selfLink: subnet.selfLink, + purpose: subnet.purpose ?: "n/a" ) } diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy index e62679bb84b..b0b4758c7bc 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/DisableGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -162,9 +162,9 @@ class DisableGoogleServerGroupAtomicOperationUnitSpec extends Specification { 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList 3 * globalForwardingRulesList.execute() >> new ForwardingRuleList(items: []) - 1 * computeMock.forwardingRules() >> forwardingRules - 1 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList - 1 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) + 2 * computeMock.forwardingRules() >> forwardingRules + 2 * forwardingRules.list(PROJECT_NAME, _) >> forwardingRulesList + 2 * forwardingRulesList.execute() >> new ForwardingRuleList(items: []) registry.timer( GoogleApiTestUtils.makeOkId( diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy index 091394b56e6..1c79018353f 100644 --- a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/EnableGoogleServerGroupAtomicOperationUnitSpec.groovy @@ -166,9 +166,9 @@ class EnableGoogleServerGroupAtomicOperationUnitSpec extends Specification { 1 * instanceTemplatesMock.get(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesGetMock 1 * instanceTemplatesGetMock.execute() >> instanceTemplate - 2 * computeMock.forwardingRules() >> forwardingRulesMock - 2 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock - 2 * forwardingRulesListMock.execute() >> forwardingRulesList + 3 * computeMock.forwardingRules() >> forwardingRulesMock + 3 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock + 3 * forwardingRulesListMock.execute() >> forwardingRulesList [TARGET_POOL_NAME_1, TARGET_POOL_NAME_2].each { targetPoolLocalName -> 1 * computeMock.targetPools() >> targetPoolsMock @@ -224,9 +224,9 @@ class EnableGoogleServerGroupAtomicOperationUnitSpec extends Specification { 1 * instanceTemplatesMock.get(PROJECT_NAME, INSTANCE_TEMPLATE_NAME) >> instanceTemplatesGetMock 1 * instanceTemplatesGetMock.execute() >> instanceTemplate - 2 * computeMock.forwardingRules() >> forwardingRulesMock - 2 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock - 2 * forwardingRulesListMock.execute() >> forwardingRulesList2 + 3 * computeMock.forwardingRules() >> forwardingRulesMock + 3 * forwardingRulesMock.list(PROJECT_NAME, REGION) >> forwardingRulesListMock + 3 * forwardingRulesListMock.execute() >> forwardingRulesList2 3 * computeMock.globalForwardingRules() >> globalForwardingRules 3 * globalForwardingRules.list(PROJECT_NAME) >> globalForwardingRulesList diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy new file mode 100644 index 00000000000..78d138700e7 --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy @@ -0,0 +1,791 @@ +/* + * Copyright 2015 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer + +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.* +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties +import com.netflix.spinnaker.clouddriver.google.deploy.GCEUtil +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry +import com.netflix.spinnaker.clouddriver.google.deploy.description.DeleteGoogleLoadBalancerDescription +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationException +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleOperationTimedOutException +import com.netflix.spinnaker.clouddriver.google.deploy.exception.GoogleResourceNotFoundException +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +class DeleteGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec extends Specification { + private static final BASE_PHASE = "test-phase" + private static final ACCOUNT_NAME = "auto" + private static final PROJECT_NAME = "my_project" + private static final HTTP_LOAD_BALANCER_NAME = "default" + private static final URL_MAP_NAME = "url-map" + private static final REGION = "us-central1" + private static final TARGET_HTTP_PROXY_URL = "projects/$PROJECT_NAME/global/targetHttpProxies/target-http-proxy" + private static final TARGET_HTTP_PROXY_NAME = "target-http-proxy" + private static final URL_MAP_URL = "project/url-map" + private static final BACKEND_SERVICE_URL = "project/backend-service" + private static final BACKEND_SERVICE_NAME = "backend-service" + private static final HEALTH_CHECK_URL = "project/health-check" + private static final HEALTH_CHECK_NAME = "health-check" + private static final FORWARDING_RULE_DELETE_OP_NAME = "delete-forwarding-rule" + private static final TARGET_HTTP_PROXY_DELETE_OP_NAME = "delete-target-http-proxy" + private static final URL_MAP_DELETE_OP_NAME = "delete-url-map" + private static final BACKEND_SERVICE_DELETE_OP_NAME = "delete-backend-service" + private static final HEALTH_CHECK_DELETE_OP_NAME = "delete-health-check" + private static final PENDING = "PENDING" + private static final DONE = "DONE" + + @Shared + def threadSleeperMock = Mock(GoogleOperationPoller.ThreadSleeper) + @Shared + def registry = new DefaultRegistry() + @Shared + SafeRetry safeRetry + + def setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + safeRetry = SafeRetry.withoutDelay() + } + + void "should delete Internal Http Load Balancer with one backend service"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + + 4 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + } + + void "should delete Internal Http Load Balancer with multiple backend services/health checks"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap( + name: URL_MAP_NAME, + defaultService: BACKEND_SERVICE_URL, + pathMatchers: [ + new PathMatcher(defaultService: BACKEND_SERVICE_URL + "2", + pathRules: [ + new PathRule(service: BACKEND_SERVICE_URL + "3"), new PathRule(service: BACKEND_SERVICE_URL) + ] + ) + ]) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendServicesGet2 = Mock(Compute.RegionBackendServices.Get) + def backendServicesGet3 = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def backendService2 = new BackendService(healthChecks: [HEALTH_CHECK_URL+"2"]) + def backendService3 = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete2 = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp2 = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME+"2", + status: DONE) + def backendServicesDelete3 = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp3 = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME+"3", + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete2 = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp2 = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME+"2", + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet2 = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet3 = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet2 = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + + 6 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"2") >> backendServicesGet2 + 1 * backendServicesGet2.execute() >> backendService2 + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"3") >> backendServicesGet3 + 1 * backendServicesGet3.execute() >> backendService3 + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"2") >> backendServicesDelete2 + 1 * backendServicesDelete2.execute() >> backendServicesDeleteOp2 + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME+"3") >> backendServicesDelete3 + 1 * backendServicesDelete3.execute() >> backendServicesDeleteOp3 + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME+"2") >> healthChecksDelete2 + 1 * healthChecksDelete2.execute() >> healthChecksDeleteOp2 + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME+"2") >> backendServicesOperationGet2 + 1 * backendServicesOperationGet2.execute() >> backendServicesDeleteOp2 + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME+"3") >> backendServicesOperationGet3 + 1 * backendServicesOperationGet3.execute() >> backendServicesDeleteOp3 + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME+"2") >> healthChecksOperationGet2 + 1 * healthChecksOperationGet2.execute() >> healthChecksDeleteOp2 + } + + void "should fail to delete an Internal Http Load Balancer that does not exist"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.registry = registry + + when: + operation.operate([]) + + then: + 1 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: []] + thrown GoogleResourceNotFoundException + } + + void "should fail to delete Internal Http Load Balancer if failed to delete a resource"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksPendingDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: PENDING) + def healthChecksFailingDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE, + error: new Operation.Error(errors: [new Operation.Error.Errors(message: "error")])) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksPendingDeleteOp + + 4 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksFailingDeleteOp + thrown GoogleOperationException + } + + void "should fail to delete Internal Http Load Balancer if timed out while deleting a resource"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: PENDING) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + deleteOperationTimeoutSeconds: 0, + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + GCEUtil.deleteGlobalListener(computeMock, PROJECT_NAME, HTTP_LOAD_BALANCER_NAME, BASE_PHASE, safeRetry, operation) >> targetHttpProxiesDeleteOp + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 1 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 1 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, _) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, _) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + + 1 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + thrown GoogleOperationTimedOutException + } + + void "should wait on slow deletion of target HTTP proxy and successfully delete simple HTTP Load Balancer"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOpPending = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: PENDING) + def targetHttpProxiesDeleteOpDone = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def backendServicesDeleteOp = new Operation( + name: BACKEND_SERVICE_DELETE_OP_NAME, + status: DONE) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def backendServicesOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOpPending + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> backendServicesDeleteOp + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + + 6 * computeMock.regionOperations() >> regionOperations + 3 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 2 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOpPending + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOpDone + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_DELETE_OP_NAME) >> backendServicesOperationGet + 1 * backendServicesOperationGet.execute() >> backendServicesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + } + + void "should not delete backend service in more than one url map"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL]) + def healthChecks = Mock(Compute.RegionHealthChecks) + + def regionForwardingRulesDelete = Mock(Compute.ForwardingRules.Delete) + def regionForwardingRulesDeleteOp = new Operation( + name: FORWARDING_RULE_DELETE_OP_NAME, + status: DONE) + def targetHttpProxiesDelete = Mock(Compute.RegionTargetHttpProxies.Delete) + def targetHttpProxiesDeleteOp = new Operation( + name: TARGET_HTTP_PROXY_DELETE_OP_NAME, + status: DONE) + def urlMapsDelete = Mock(Compute.RegionUrlMaps.Delete) + def urlMapsDeleteOp = new Operation( + name: URL_MAP_DELETE_OP_NAME, + status: DONE) + def backendServicesDelete = Mock(Compute.RegionBackendServices.Delete) + def healthChecksDelete = Mock(Compute.RegionHealthChecks.Delete) + def healthChecksDeleteOp = new Operation( + name: HEALTH_CHECK_DELETE_OP_NAME, + status: DONE) + + def regionOperations = Mock(Compute.RegionOperations) + def targetHttpProxiesOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapsOperationGet = Mock(Compute.RegionOperations.Get) + def healthChecksOperationGet = Mock(Compute.RegionOperations.Get) + + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + def conflictingMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: "conflicting") + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 3 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 1 * regionForwardingRules.get(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesGet + 1 * regionForwardingRulesGet.execute() >> forwardingRule + 3 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap, conflictingMap]) + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + + 1 * regionForwardingRules.delete(PROJECT_NAME, REGION, HTTP_LOAD_BALANCER_NAME) >> regionForwardingRulesDelete + 1 * regionForwardingRulesDelete.execute() >> regionForwardingRulesDeleteOp + 1 * targetHttpProxies.delete(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesDelete + 1 * targetHttpProxiesDelete.execute() >> targetHttpProxiesDeleteOp + 1 * urlMaps.delete(PROJECT_NAME, REGION, URL_MAP_NAME) >> urlMapsDelete + 1 * urlMapsDelete.execute() >> urlMapsDeleteOp + 1 * backendServices.delete(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesDelete + 1 * backendServicesDelete.execute() >> null + 1 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.delete(PROJECT_NAME, REGION, HEALTH_CHECK_NAME) >> healthChecksDelete + 1 * healthChecksDelete.execute() >> healthChecksDeleteOp + + 3 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_DELETE_OP_NAME) >> targetHttpProxiesOperationGet + 1 * targetHttpProxiesOperationGet.execute() >> targetHttpProxiesDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_DELETE_OP_NAME) >> urlMapsOperationGet + 1 * urlMapsOperationGet.execute() >> urlMapsDeleteOp + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_DELETE_OP_NAME) >> healthChecksOperationGet + 1 * healthChecksOperationGet.execute() >> healthChecksDeleteOp + } + + void "should fail if server group still associated"() { + setup: + def computeMock = Mock(Compute) + def regionForwardingRules = Mock(Compute.ForwardingRules) + def regionForwardingRulesList = Mock(Compute.ForwardingRules.List) + def forwardingRule = new ForwardingRule(target: TARGET_HTTP_PROXY_URL, name: HTTP_LOAD_BALANCER_NAME, loadBalancingScheme: "INTERNAL_MANAGED") + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesGet = Mock(Compute.RegionTargetHttpProxies.Get) + def targetHttpProxy = new TargetHttpProxy(urlMap: URL_MAP_URL) + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsList = Mock(Compute.RegionUrlMaps.List) + def urlMap = new UrlMap(defaultService: BACKEND_SERVICE_URL, name: URL_MAP_NAME) + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesGet = Mock(Compute.RegionBackendServices.Get) + def backendService = new BackendService(healthChecks: [HEALTH_CHECK_URL], backends: [new Backend()]) + def credentials = new GoogleNamedAccountCredentials.Builder().project(PROJECT_NAME).compute(computeMock).build() + def description = new DeleteGoogleLoadBalancerDescription( + loadBalancerName: HTTP_LOAD_BALANCER_NAME, + region: REGION, + accountName: ACCOUNT_NAME, + credentials: credentials) + @Subject def operation = new DeleteGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 1 * computeMock.forwardingRules() >> regionForwardingRules + 1 * regionForwardingRules.list(PROJECT_NAME, REGION) >> regionForwardingRulesList + 1 * regionForwardingRulesList.execute() >> [items: [forwardingRule]] + 2 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 2 * targetHttpProxies.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_NAME) >> targetHttpProxiesGet + 2 * targetHttpProxiesGet.execute() >> targetHttpProxy + 1 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.list(PROJECT_NAME, REGION) >> urlMapsList + 1 * urlMapsList.execute() >> new UrlMapList(items: [urlMap]) + 1 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.get(PROJECT_NAME, REGION, BACKEND_SERVICE_NAME) >> backendServicesGet + 1 * backendServicesGet.execute() >> backendService + thrown IllegalStateException + } +} diff --git a/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy new file mode 100644 index 00000000000..9d21f8ce2eb --- /dev/null +++ b/clouddriver-google/src/test/groovy/com/netflix/spinnaker/clouddriver/google/deploy/ops/loadbalancer/UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec.groovy @@ -0,0 +1,1104 @@ +/* + * Copyright 2014 Google, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer + +import com.fasterxml.jackson.databind.ObjectMapper +import com.google.api.services.compute.Compute +import com.google.api.services.compute.model.* +import com.netflix.spectator.api.DefaultRegistry +import com.netflix.spinnaker.clouddriver.data.task.Task +import com.netflix.spinnaker.clouddriver.data.task.TaskRepository +import com.netflix.spinnaker.clouddriver.google.config.GoogleConfigurationProperties +import com.netflix.spinnaker.clouddriver.google.deploy.GoogleOperationPoller +import com.netflix.spinnaker.clouddriver.google.deploy.SafeRetry +import com.netflix.spinnaker.clouddriver.google.deploy.converters.UpsertGoogleLoadBalancerAtomicOperationConverter +import com.netflix.spinnaker.clouddriver.google.model.GoogleHealthCheck +import com.netflix.spinnaker.clouddriver.google.model.GoogleNetwork +import com.netflix.spinnaker.clouddriver.google.model.GoogleSubnet +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleNetworkProvider +import com.netflix.spinnaker.clouddriver.google.provider.view.GoogleSubnetProvider +import com.netflix.spinnaker.clouddriver.google.security.FakeGoogleCredentials +import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials +import com.netflix.spinnaker.clouddriver.security.DefaultAccountCredentialsProvider +import com.netflix.spinnaker.clouddriver.security.MapBackedAccountCredentialsRepository +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Subject + +import static com.netflix.spinnaker.clouddriver.google.deploy.ops.loadbalancer.UpsertGoogleHttpLoadBalancerTestConstants.* + +class UpsertGoogleInternalHttpLoadBalancerAtomicOperationUnitSpec extends Specification { + private static final PROJECT_NAME = "my-project" + private static final HEALTH_CHECK_OP_NAME = "health-check-op" + private static final BACKEND_SERVICE_OP_NAME = "backend-service-op" + private static final URL_MAP_OP_NAME = "url-map-op" + private static final TARGET_HTTP_PROXY_OP_NAME = "target-http-proxy-op" + private static final DONE = "DONE" + private static final REGION = "us-central1" + + @Shared GoogleHealthCheck hc + @Shared def threadSleeperMock = Mock(GoogleOperationPoller.ThreadSleeper) + @Shared def registry = new DefaultRegistry() + @Shared SafeRetry safeRetry + + def setupSpec() { + TaskRepository.threadLocalTask.set(Mock(Task)) + hc = [ + "name" : "basic-check", + "requestPath" : "/", + "healthCheckType" : "HTTP", + "port" : 80, + "checkIntervalSec" : 1, + "timeoutSec" : 1, + "healthyThreshold" : 1, + "unhealthyThreshold": 1 + ] + safeRetry = SafeRetry.withoutDelay() + } + + void "should create an Internal HTTP Load Balancer with host rule, path matcher, path rules, etc with no existing infrastructure"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(ACCOUNT_NAME, credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + accountCredentialsProvider: credentialsProvider, + objectMapper: new ObjectMapper() + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "network" : "some-network", + "subnet" : "some-subnet", + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ] + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 3 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 3 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksInsertOp + 3 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 3 * backendServiceOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should create an Internal HTTP Load Balancer with minimal description"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(ACCOUNT_NAME, credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + accountCredentialsProvider: credentialsProvider, + objectMapper: new ObjectMapper() + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : null, + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 1 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 1 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 5 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should create an Internal HTTPS Load Balancer when certificate specified"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(ACCOUNT_NAME, credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + accountCredentialsProvider: credentialsProvider, + objectMapper: new ObjectMapper() + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpsProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpsProxies = Mock(Compute.RegionTargetHttpsProxies) + def targetHttpsProxiesInsert = Mock(Compute.RegionTargetHttpsProxies.Insert) + def targetHttpsProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "certificate" : "my-cert", + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "hostRules" : null, + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 2 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 1 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 1 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpsProxies() >> targetHttpsProxies + 1 * targetHttpsProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpsProxiesInsert + 1 * targetHttpsProxiesInsert.execute() >> targetHttpsProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 5 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpsProxyOperationGet + 1 * targetHttpsProxyOperationGet.execute() >> healthChecksInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should update health check when it exists and needs updated"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(ACCOUNT_NAME, credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + accountCredentialsProvider: credentialsProvider, + objectMapper: new ObjectMapper() + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + def healthChecksUpdateOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: []) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ], + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 3 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 3 * backendServicesInsert.execute() >> backendServicesInsertOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksUpdateOp + 3 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 3 * backendServiceOperationGet.execute() >> backendServicesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> urlMapsInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> targetHttpProxiesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should update backend service if it exists and needs updated"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(ACCOUNT_NAME, credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + accountCredentialsProvider: credentialsProvider, + objectMapper: new ObjectMapper() + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + def healthChecksUpdateOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: [new BackendService(name: PM_SERVICE, sessionAffinity: 'NONE')]) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + def backendServicesUpdate = Mock(Compute.RegionBackendServices.Update) + def backendServicesUpdateOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME + "update", + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapsInsert = Mock(Compute.RegionUrlMaps.Insert) + def urlMapsInsertOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ], + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 2 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 2 * backendServicesInsert.execute() >> backendServicesInsertOp + 1 * backendServices.update(PROJECT_NAME, REGION, PM_SERVICE, _) >> backendServicesUpdate + 1 * backendServicesUpdate.execute() >> backendServicesUpdateOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> null + 1 * urlMaps.insert(PROJECT_NAME, REGION, _) >> urlMapsInsert + 1 * urlMapsInsert.execute() >> urlMapsInsertOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsInsertOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksUpdateOp + 2 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 2 * backendServiceOperationGet.execute() >> backendServicesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME + "update") >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> backendServicesUpdateOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> urlMapsInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> targetHttpProxiesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } + + void "should update url map if it exists and needs updated"() { + setup: + def computeMock = Mock(Compute) + + def credentialsRepo = new MapBackedAccountCredentialsRepository() + def credentialsProvider = new DefaultAccountCredentialsProvider(credentialsRepo) + def credentials = new GoogleNamedAccountCredentials.Builder().name(ACCOUNT_NAME).project(PROJECT_NAME,).applicationName("my-application").compute(computeMock).credentials(new FakeGoogleCredentials()).build() + credentialsRepo.save(ACCOUNT_NAME, credentials) + def converter = new UpsertGoogleLoadBalancerAtomicOperationConverter( + accountCredentialsProvider: credentialsProvider, + objectMapper: new ObjectMapper() + ) + + def regionOperations = Mock(Compute.RegionOperations) + def healthCheckOperationGet = Mock(Compute.RegionOperations.Get) + def backendServiceOperationGet = Mock(Compute.RegionOperations.Get) + def urlMapOperationGet = Mock(Compute.RegionOperations.Get) + def targetHttpProxyOperationGet = Mock(Compute.RegionOperations.Get) + def forwardingRuleOperationGet = Mock(Compute.RegionOperations.Get) + + def googleNetworkProviderMock = Mock(GoogleNetworkProvider) + def networkKeyPattern = "gce:networks:some-network:$ACCOUNT_NAME:global" + def googleNetwork = new GoogleNetwork(selfLink: "projects/$PROJECT_NAME/global/networks/some-network") + + def googleSubnetProviderMock = Mock(GoogleSubnetProvider) + def subnetKeyPattern = "gce:subnets:some-subnet:$ACCOUNT_NAME:$REGION" + def googleSubnet = new GoogleSubnet(selfLink: "projects/$PROJECT_NAME/regions/$REGION/subnetworks/some-subnet") + + + def healthChecks = Mock(Compute.RegionHealthChecks) + def healthChecksList = Mock(Compute.RegionHealthChecks.List) + def healthCheckListReal = new HealthCheckList(items: []) + def healthChecksInsert = Mock(Compute.RegionHealthChecks.Insert) + def healthChecksInsertOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + def healthChecksUpdateOp = new Operation( + targetLink: "health-check", + name: HEALTH_CHECK_OP_NAME, + status: DONE) + + def backendServices = Mock(Compute.RegionBackendServices) + def backendServicesList = Mock(Compute.RegionBackendServices.List) + def bsListReal = new BackendServiceList(items: [new BackendService(name: PM_SERVICE, sessionAffinity: 'NONE')]) + def backendServicesInsert = Mock(Compute.RegionBackendServices.Insert) + def backendServicesInsertOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME, + status: DONE) + def backendServicesUpdate = Mock(Compute.RegionBackendServices.Update) + def backendServicesUpdateOp = new Operation( + targetLink: "backend-service", + name: BACKEND_SERVICE_OP_NAME + "update", + status: DONE) + + def urlMaps = Mock(Compute.RegionUrlMaps) + def urlMapsGet = Mock(Compute.RegionUrlMaps.Get) + def urlMapReal = new UrlMap(name: LOAD_BALANCER_NAME) + def urlMapsUpdate = Mock(Compute.RegionUrlMaps.Update) + def urlMapsUpdateOp = new Operation( + targetLink: "url-map", + name: URL_MAP_OP_NAME, + status: DONE) + + def targetHttpProxies = Mock(Compute.RegionTargetHttpProxies) + def targetHttpProxiesInsert = Mock(Compute.RegionTargetHttpProxies.Insert) + def targetHttpProxiesInsertOp = new Operation( + targetLink: "target-proxy", + name: TARGET_HTTP_PROXY_OP_NAME, + status: DONE) + + def forwardingRules = Mock(Compute.ForwardingRules) + def forwardingRulesInsert = Mock(Compute.ForwardingRules.Insert) + def forwardingRulesGet = Mock(Compute.ForwardingRules.Get) + def forwardingRuleInsertOp = new Operation( + targetLink: "forwarding-rule", + name: LOAD_BALANCER_NAME, + status: DONE) + + def input = [ + accountName : ACCOUNT_NAME, + "loadBalancerName": LOAD_BALANCER_NAME, + "portRange" : PORT_RANGE, + "region" : REGION, + "defaultService" : [ + "name" : DEFAULT_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ], + "certificate" : "", + "hostRules" : [ + [ + "hostPatterns": [ + "host1.com", + "host2.com" + ], + "pathMatcher" : [ + "pathRules" : [ + [ + "paths" : [ + "/path", + "/path2/more" + ], + "backendService": [ + "name" : PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ], + "defaultService": [ + "name" : DEFAULT_PM_SERVICE, + "backends" : [], + "healthCheck": hc, + "sessionAffinity": "NONE", + ] + ] + ] + ], + "network" : "some-network", + "subnet" : "some-subnet", + ] + def description = converter.convertDescription(input) + @Subject def operation = new UpsertGoogleInternalHttpLoadBalancerAtomicOperation(description) + operation.googleOperationPoller = + new GoogleOperationPoller( + googleConfigurationProperties: new GoogleConfigurationProperties(), + threadSleeper: threadSleeperMock, + registry: registry, + safeRetry: safeRetry + ) + operation.googleNetworkProvider = googleNetworkProviderMock + operation.googleSubnetProvider = googleSubnetProviderMock + operation.registry = registry + operation.safeRetry = safeRetry + + when: + operation.operate([]) + + then: + + 1 * googleNetworkProviderMock.getAllMatchingKeyPattern(networkKeyPattern) >> [googleNetwork] + 1 * googleSubnetProviderMock.getAllMatchingKeyPattern(subnetKeyPattern) >> [googleSubnet] + 2 * computeMock.regionHealthChecks() >> healthChecks + 1 * healthChecks.list(PROJECT_NAME, REGION) >> healthChecksList + 1 * healthChecksList.execute() >> healthCheckListReal + 1 * healthChecks.insert(PROJECT_NAME, REGION, _) >> healthChecksInsert + 1 * healthChecksInsert.execute() >> healthChecksInsertOp + + 4 * computeMock.regionBackendServices() >> backendServices + 1 * backendServices.list(PROJECT_NAME, REGION) >> backendServicesList + 1 * backendServicesList.execute() >> bsListReal + 2 * backendServices.insert(PROJECT_NAME, REGION, _) >> backendServicesInsert + 2 * backendServicesInsert.execute() >> backendServicesInsertOp + 1 * backendServices.update(PROJECT_NAME, REGION, PM_SERVICE, _) >> backendServicesUpdate + 1 * backendServicesUpdate.execute() >> backendServicesUpdateOp + + 2 * computeMock.regionUrlMaps() >> urlMaps + 1 * urlMaps.get(PROJECT_NAME, REGION, description.loadBalancerName) >> urlMapsGet + 1 * urlMapsGet.execute() >> urlMapReal + 1 * urlMaps.update(PROJECT_NAME, REGION, LOAD_BALANCER_NAME, _) >> urlMapsUpdate + 1 * urlMapsUpdate.execute() >> urlMapsUpdateOp + + 1 * computeMock.regionTargetHttpProxies() >> targetHttpProxies + 1 * targetHttpProxies.insert(PROJECT_NAME, REGION, {it.urlMap == urlMapsUpdateOp.targetLink}) >> targetHttpProxiesInsert + 1 * targetHttpProxiesInsert.execute() >> targetHttpProxiesInsertOp + + 2 * computeMock.forwardingRules() >> forwardingRules + 1 * forwardingRules.insert(PROJECT_NAME, REGION, _) >> forwardingRulesInsert + 1 * forwardingRules.get(PROJECT_NAME, REGION, _) >> forwardingRulesGet + 1 * forwardingRulesInsert.execute() >> forwardingRuleInsertOp + 1 * forwardingRulesGet.execute() >> null + + 7 * computeMock.regionOperations() >> regionOperations + 1 * regionOperations.get(PROJECT_NAME, REGION, HEALTH_CHECK_OP_NAME) >> healthCheckOperationGet + 1 * healthCheckOperationGet.execute() >> healthChecksUpdateOp + 2 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME) >> backendServiceOperationGet + 2 * backendServiceOperationGet.execute() >> backendServicesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, BACKEND_SERVICE_OP_NAME + "update") >> backendServiceOperationGet + 1 * backendServiceOperationGet.execute() >> backendServicesUpdateOp + 1 * regionOperations.get(PROJECT_NAME, REGION, URL_MAP_OP_NAME) >> urlMapOperationGet + 1 * urlMapOperationGet.execute() >> urlMapsUpdateOp + 1 * regionOperations.get(PROJECT_NAME, REGION, TARGET_HTTP_PROXY_OP_NAME) >> targetHttpProxyOperationGet + 1 * targetHttpProxyOperationGet.execute() >> targetHttpProxiesInsertOp + 1 * regionOperations.get(PROJECT_NAME, REGION, LOAD_BALANCER_NAME) >> forwardingRuleOperationGet + 1 * forwardingRuleOperationGet.execute() >> forwardingRuleInsertOp + } +}