Skip to content

Commit

Permalink
feat(provider/google): GCE Autoscaler Support for Clouddriver. (#5748)
Browse files Browse the repository at this point in the history
  • Loading branch information
sanopsmx committed Aug 8, 2022
1 parent 287acf3 commit 6e3cd55
Show file tree
Hide file tree
Showing 14 changed files with 345 additions and 187 deletions.
5 changes: 4 additions & 1 deletion clouddriver-google/clouddriver-google.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ dependencies {

implementation "org.codehaus.groovy:groovy-all"
implementation "org.apache.commons:commons-lang3"
implementation "com.google.apis:google-api-services-compute"
implementation ("com.google.apis:google-api-services-compute:beta-rev20201102-1.30.10") {
force = true
}
implementation "com.google.guava:guava"
implementation "com.google.apis:google-api-services-iam"
implementation 'com.google.auth:google-auth-library-oauth2-http'
implementation "com.netflix.frigga:frigga"
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@ class BasicGoogleDeployHandler implements DeployHandler<BasicGoogleDeployDescrip

private boolean autoscalerIsSpecified(BasicGoogleDeployDescription description) {
return description.autoscalingPolicy?.with {
cpuUtilization || loadBalancingUtilization || customMetricUtilizations
cpuUtilization || loadBalancingUtilization || customMetricUtilizations || scalingSchedules
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,44 +103,44 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation
task.updateStatus BASE_PHASE, "Updating autoscaler for $serverGroupName..."

autoscaler = GCEUtil.buildAutoscaler(serverGroupName,
serverGroup.selfLink,
copyAndOverrideAncestorAutoscalingPolicy(ancestorAutoscalingPolicyDescription,
description.autoscalingPolicy))
serverGroup.selfLink,
copyAndOverrideAncestorAutoscalingPolicy(ancestorAutoscalingPolicyDescription,
description.autoscalingPolicy))

if (isRegional) {
def updateOp = timeExecute(
compute.regionAutoscalers().update(project, region, autoscaler),
"compute.regionAutoscalers.update",
TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region)
compute.regionAutoscalers().update(project, region, autoscaler),
"compute.regionAutoscalers.update",
TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region)
googleOperationPoller.waitForRegionalOperation(compute, project, region,
updateOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE)
} else {
def updateOp = timeExecute(
compute.autoscalers().update(project, zone, autoscaler),
"compute.autoscalers.update",
TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone)
compute.autoscalers().update(project, zone, autoscaler),
"compute.autoscalers.update",
TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone)
googleOperationPoller.waitForZonalOperation(compute, project, zone,
updateOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE)
}
} else {
task.updateStatus BASE_PHASE, "Creating new autoscaler for $serverGroupName..."

autoscaler = GCEUtil.buildAutoscaler(serverGroupName,
serverGroup.selfLink,
normalizeNewAutoscalingPolicy(description.autoscalingPolicy))
serverGroup.selfLink,
normalizeNewAutoscalingPolicy(description.autoscalingPolicy))

if (isRegional) {
def insertOp = timeExecute(
compute.regionAutoscalers().insert(project, region, autoscaler),
"compute.regionAutoscalers.insert",
TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region)
compute.regionAutoscalers().insert(project, region, autoscaler),
"compute.regionAutoscalers.insert",
TAG_SCOPE, SCOPE_REGIONAL, TAG_REGION, region)
googleOperationPoller.waitForRegionalOperation(compute, project, region,
insertOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE)
} else {
def insertOp = timeExecute(
compute.autoscalers().insert(project, zone, autoscaler),
"compute.autoscalers.insert",
TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone)
compute.autoscalers().insert(project, zone, autoscaler),
"compute.autoscalers.insert",
TAG_SCOPE, SCOPE_ZONAL, TAG_ZONE, zone)
googleOperationPoller.waitForZonalOperation(compute, project, zone,
insertOp.getName(), null, task, "autoScaler ${autoscaler.getName()} for server group $serverGroupName", BASE_PHASE)
}
Expand Down Expand Up @@ -222,7 +222,7 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation
}

// Deletes existing customMetricUtilizations if passed an empty array.
["minNumReplicas", "maxNumReplicas", "coolDownPeriodSec", "customMetricUtilizations", "mode"].each {
["minNumReplicas", "maxNumReplicas", "coolDownPeriodSec", "customMetricUtilizations", "mode", "scalingSchedules"].each {
if (update[it] != null) {
newDescription[it] = update[it]
}
Expand Down Expand Up @@ -307,13 +307,13 @@ class UpsertGoogleAutoscalingPolicyAtomicOperation extends GoogleAtomicOperation

List<InstanceGroupManagerAutoHealingPolicy> autoHealingPolicy = autoHealingPolicyDescription?.healthCheck
? [new InstanceGroupManagerAutoHealingPolicy(
healthCheck: autoHealingHealthCheck.selfLink,
initialDelaySec: autoHealingPolicyDescription.initialDelaySec)]
healthCheck: autoHealingHealthCheck.selfLink,
initialDelaySec: autoHealingPolicyDescription.initialDelaySec)]
: null

if (autoHealingPolicy && autoHealingPolicyDescription.maxUnavailable) {
def maxUnavailable = new FixedOrPercent(fixed: autoHealingPolicyDescription.maxUnavailable.fixed as Integer,
percent: autoHealingPolicyDescription.maxUnavailable.percent as Integer)
percent: autoHealingPolicyDescription.maxUnavailable.percent as Integer)

autoHealingPolicy[0].setMaxUnavailable(maxUnavailable)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,14 @@ class SaveSnapshotAtomicOperation implements AtomicOperation<Void> {
if (autoscalingPolicy.cpuUtilization?.utilizationTarget) {
autoscalerMap.autoscaling_policy.cpu_utilization = [:]
autoscalerMap.autoscaling_policy.cpu_utilization.target = autoscalingPolicy.cpuUtilization.utilizationTarget
switch (autoscalingPolicy.cpuUtilization.predictiveMethod) {
case "NONE":
autoscalerMap.autoscaling_policy.cpu_utilization.predictive_method = "none"
break
case "OPTIMIZE_AVAILABILITY":
autoscalerMap.autoscaling_policy.cpu_utilization.predictive_method = "optimize_availability"
break
}
}
if (autoscalingPolicy.customMetricUtilizations) {
autoscalerMap.autoscaling_policy.metric = []
Expand All @@ -421,6 +429,12 @@ class SaveSnapshotAtomicOperation implements AtomicOperation<Void> {
} else {
return
}
if (metric.filter) {
metricMap.filter = metric.filter
}
if (metric.singleInstanceAssignment) {
metricMap.single_instance_assignment = metric.singleInstanceAssignment
}
//TODO(nwwebb) gce doesn't match terraform types
switch(metric.utilizationTargetType) {
case "GAUGE":
Expand All @@ -438,6 +452,32 @@ class SaveSnapshotAtomicOperation implements AtomicOperation<Void> {
autoscalerMap.autoscaling_policy.metric.add(metricMap)
}
}

if (autoscalingPolicy.scalingSchedules) {
autoscalerMap.autoscaling_policy.scalingSchedules = []
autoscalingPolicy.scalingSchedules.each {Map scalingSchedule ->
def scalingScheduleMap = [:]
if (scalingSchedule.scalingSchedule) {
scalingScheduleMap.name = scalingSchedule.scalingSchedule
}
if (scalingSchedule.description) {
scalingScheduleMap.description = scalingSchedule.description
}
if (scalingSchedule.disabled) {
scalingScheduleMap.disabled = scalingSchedule.disabled
}
if (scalingSchedule.durationSec) {
scalingScheduleMap.duration_sec = scalingSchedule.durationSec
}
if (scalingSchedule.minRequiredReplicas) {
scalingScheduleMap.min_required_replicas = scalingSchedule.minRequiredReplicas
}
if (scalingSchedule.timeZone) {
scalingScheduleMap.time_zone = scalingSchedule.timeZone
}
autoscalerMap.autoscaling_policy.scalingSchedules.add(scalingScheduleMap)
}
}
if (autoscalingPolicy.loadBalancingUtilization?.utilizationTarget) {
autoscalerMap.autoscaling_policy.load_balancing_utilization = [:]
autoscalerMap.autoscaling_policy.load_balancing_utilization.target = autoscalingPolicy.loadBalancingUtilization.utilizationTarget
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import com.netflix.spinnaker.clouddriver.google.security.GoogleCredentials
import com.netflix.spinnaker.clouddriver.google.security.GoogleNamedAccountCredentials
import com.netflix.spinnaker.credentials.CredentialsRepository
import com.netflix.spinnaker.kork.artifacts.model.Artifact
import org.springframework.scheduling.support.CronSequenceGenerator

/**
* Common validation routines for standard description attributes.
Expand Down Expand Up @@ -220,8 +221,8 @@ class StandardGceAttributeValidator {
def result = true
if (value < min || value > max) {
errors.rejectValue(attribute,
"${context}.${attribute}.rangeViolation",
"${context}.${attribute} must be between ${min} and ${max}, inclusive.")
"${context}.${attribute}.rangeViolation",
"${context}.${attribute} must be between ${min} and ${max}, inclusive.")
result = false
}
return result
Expand All @@ -231,8 +232,8 @@ class StandardGceAttributeValidator {
def result = true
if (maxValue < minValue) {
errors.rejectValue(maxAttribute,
"${context}.${maxAttribute}.lessThanMin",
"${context}.${maxAttribute} must not be less than ${context}.${minAttribute}.")
"${context}.${maxAttribute}.lessThanMin",
"${context}.${maxAttribute} must not be less than ${context}.${minAttribute}.")
result = false
}
return result
Expand Down Expand Up @@ -378,17 +379,17 @@ class StandardGceAttributeValidator {

if (!persistentDiskCount) {
errors.rejectValue("disks",
"${context}.disks.missingPersistentDisk",
"A persistent boot disk is required.")
"${context}.disks.missingPersistentDisk",
"A persistent boot disk is required.")
}
}

// Persistent disks must be at least 10GB.
specifiedDisks.findAll { it.persistent }.eachWithIndex { persistentDisk, index ->
if (persistentDisk.sizeGb < 10) {
errors.rejectValue("disks",
"${context}.disk${index}.sizeGb.invalidSize",
"Persistent disks must be at least 10GB.")
"${context}.disk${index}.sizeGb.invalidSize",
"Persistent disks must be at least 10GB.")
}
}

Expand All @@ -399,36 +400,36 @@ class StandardGceAttributeValidator {
if (disk.is(firstPersistentDisk)) {
if (firstPersistentDisk.sourceImage) {
errors.rejectValue("disks",
"${context}.disk${index}.sourceImage.unexpected",
"The boot disk must not specify source image, it must be specified at the top-level on the request as `image`.")
"${context}.disk${index}.sourceImage.unexpected",
"The boot disk must not specify source image, it must be specified at the top-level on the request as `image`.")
}
} else if (disk.persistent && !disk.sourceImage) {
errors.rejectValue("disks",
"${context}.disk${index}.sourceImage.required",
"All non-boot persistent disks are required to specify source image.")
"${context}.disk${index}.sourceImage.required",
"All non-boot persistent disks are required to specify source image.")
}
}

specifiedDisks.findAll { it.type == GoogleDiskType.LOCAL_SSD }.eachWithIndex { localSSDDisk, index ->
// Shared-core instance types do not support local-ssd.
if (!instanceTypeDisk.supportsLocalSSD) {
errors.rejectValue("disks",
"${context}.disk${index}.type.localSSDUnsupported",
"Instance type $instanceTypeDisk.instanceType does not support Local SSD.")
"${context}.disk${index}.type.localSSDUnsupported",
"Instance type $instanceTypeDisk.instanceType does not support Local SSD.")
}

// local-ssd disks must be exactly 375GB.
if (localSSDDisk.sizeGb != 375) {
errors.rejectValue("disks",
"${context}.disk${index}.sizeGb.invalidSize",
"Local SSD disks must be exactly 375GB.")
"${context}.disk${index}.sizeGb.invalidSize",
"Local SSD disks must be exactly 375GB.")
}

// local-ssd disks must have auto-delete set.
if (!localSSDDisk.autoDelete) {
errors.rejectValue("disks",
"${context}.disk${index}.autoDelete.required",
"Local SSD disks must have auto-delete set.")
"${context}.disk${index}.autoDelete.required",
"Local SSD disks must have auto-delete set.")
}
}
}
Expand All @@ -450,9 +451,21 @@ class StandardGceAttributeValidator {

if (minNumReplicas != null && maxNumReplicas != null) {
validateMaxNotLessThanMin(minNumReplicas,
maxNumReplicas,
"autoscalingPolicy.minNumReplicas",
"autoscalingPolicy.maxNumReplicas")
maxNumReplicas,
"autoscalingPolicy.minNumReplicas",
"autoscalingPolicy.maxNumReplicas")
}

if (cpuUtilization != null) {
cpuUtilization.with {
if (utilizationTarget != null) {
validateInRangeExclusive(utilizationTarget,
0, 1, "autoscalingPolicy.cpuUtilization.utilizationTarget")
}
if (predictiveMethod != null) {
validateNotEmpty(predictiveMethod, "autoscalingPolicy.cpuUtilization.predictiveMethod")
}
}
}

customMetricUtilizations.eachWithIndex { utilization, index ->
Expand All @@ -463,18 +476,36 @@ class StandardGceAttributeValidator {

if (utilizationTarget <= 0) {
errors.rejectValue("${context}.${path}.utilizationTarget",
"${context}.${path}.utilizationTarget must be greater than zero.")
"${context}.${path}.utilizationTarget must be greater than zero.")
}

validateNotEmpty(utilizationTargetType, "${path}.utilizationTargetType")

if (singleInstanceAssignment < 0) {
errors.rejectValue("${context}.${path}.singleInstanceAssignment",
"${context}.${path}.singleInstanceAssignment must be greater than zero.")
}
}
}

scalingSchedules.each { scalingSchedule ->
if(scalingSchedule != null) {
if (scalingSchedule.duration != null) {
validateInRangeExclusive(scalingSchedule.duration,
300, Integer.MAX_VALUE, "autoscalingPolicy.scalingSchedule.duration")
}
if (scalingSchedule.scheduleCron != null) {
validateCronExpression(scalingSchedule.scheduleCron, "autoscalingPolicy.scalingSchedule.scheduleCron")
}
if (scalingSchedule.timezone != null) {
validateTimeZone(scalingSchedule.timezone, "autoscalingPolicy.scalingSchedule.timezone")
}
}
}
}

[ "cpuUtilization", "loadBalancingUtilization" ].each {
if (policy[it] != null && policy[it].utilizationTarget != null) {
validateInRangeExclusive(policy[it].utilizationTarget,
0, 1, "autoscalingPolicy.${it}.utilizationTarget")
if (loadBalancingUtilization != null && loadBalancingUtilization.utilizationTarget) {
validateInRangeExclusive(loadBalancingUtilization.utilizationTarget,
0, 1, "autoscalingPolicy.loadBalancingUtilization.utilizationTarget")
}
}
}
Expand All @@ -494,10 +525,10 @@ class StandardGceAttributeValidator {
validateNonNegativeLong(fixed as int, "autoHealingPolicy.maxUnavailable.fixed")
} else if (percent != null) {
validateInRangeInclusive(percent as int,
0, 100, "autoHealingPolicy.maxUnavailable.percent")
0, 100, "autoHealingPolicy.maxUnavailable.percent")
} else if (rejectEmptyMaxUnavailable) {
this.errors.rejectValue("autoHealingPolicy.maxUnavailable",
"${this.context}.autoHealingPolicy.maxUnavailable.neitherFixedNorPercent")
"${this.context}.autoHealingPolicy.maxUnavailable.neitherFixedNorPercent")
}
}
}
Expand Down Expand Up @@ -531,4 +562,23 @@ class StandardGceAttributeValidator {
def validateAuthScopes(List<String> authScopes) {
return validateOptionalNameList(authScopes, "authScope")
}

def validateCronExpression(String expression, String attribute) {
def result = CronSequenceGenerator.isValidExpression("* " + expression)
if(!result){
errors.rejectValue attribute, "${context}.${attribute} must be a valid CRON expression."
}
return result
}

def validateTimeZone(String timeZone, String attribute) {
def result = true
try {
result = Set.of(TimeZone.getAvailableIDs()).contains(timeZone)
}catch (Exception e){
errors.rejectValue attribute, "${context}.${attribute} must be a time zone name from the tz database."
result = false
}
return result
}
}
Loading

0 comments on commit 6e3cd55

Please sign in to comment.