Skip to content

Commit

Permalink
fix(aws): *do* filter scaling processes to resume/suspend (#3925)
Browse files Browse the repository at this point in the history
This reverts commit b146bf6 (#3910)

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
  • Loading branch information
dreynaud and mergify[bot] committed Sep 24, 2020
1 parent bb05094 commit 6c24065
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 48 deletions.
Expand Up @@ -39,6 +39,13 @@ abstract class AbstractAwsScalingProcessTask extends AbstractCloudProviderAwareT

abstract String getType()

/**
* @param targetReference Current ASG reference
* @param processes Requested scaling processes to suspend/resume
* @return Scaling processes that need modification based on current state of ASG (may be empty if scaling processes already exist)
*/
abstract List<String> filterProcesses(TargetServerGroup targetServerGroup, List<String> processes)

@Nonnull
@Override
TaskResult execute(@Nonnull StageExecution stage) {
Expand All @@ -59,7 +66,10 @@ abstract class AbstractAwsScalingProcessTask extends AbstractCloudProviderAwareT
*
* Otherwise, use any scaling processes that were modified by a previous stage (context.scalingProcesses.ASG_NAME)
*/
def processes = (stage.context.processes ?: stage.context["scalingProcesses.${asgName}" as String] ?: []) as List<String>
def processes = filterProcesses(
targetServerGroup,
(stage.context.processes ?: stage.context["scalingProcesses.${asgName}" as String]) as List<String>
)
def stageContext = new HashMap(stage.context) + [
processes: processes,
asgName : asgName
Expand Down
Expand Up @@ -15,9 +15,25 @@
*/
package com.netflix.spinnaker.orca.clouddriver.tasks.providers.aws.scalingprocess

import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.support.TargetServerGroup
import org.springframework.stereotype.Component

@Component
class ResumeAwsScalingProcessTask extends AbstractAwsScalingProcessTask {
String type = "resumeAsgProcessesDescription"

@Override
List<String> filterProcesses(TargetServerGroup targetServerGroup, List<String> processes) {
if (!processes) {
return []
}

def targetAsgConfiguration = targetServerGroup.asg as Map<String, Object>
if (targetAsgConfiguration.suspendedProcesses) {
def suspendedProcesses = targetAsgConfiguration.suspendedProcesses*.processName as List<String>
return suspendedProcesses.intersect(processes) ?: []
}

return []
}
}
Expand Up @@ -15,9 +15,25 @@
*/
package com.netflix.spinnaker.orca.clouddriver.tasks.providers.aws.scalingprocess

import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.support.TargetServerGroup
import org.springframework.stereotype.Component

@Component
class SuspendAwsScalingProcessTask extends AbstractAwsScalingProcessTask {
String type = "suspendAsgProcessesDescription"

@Override
List<String> filterProcesses(TargetServerGroup targetServerGroup, List<String> processes) {
if (!processes) {
return []
}

def targetAsgConfiguration = targetServerGroup.asg as Map<String, Object>
if (targetAsgConfiguration.suspendedProcesses) {
def suspendedProcesses = targetAsgConfiguration.suspendedProcesses*.processName as List<String>
return processes - suspendedProcesses
}

return processes
}
}
Expand Up @@ -33,52 +33,52 @@ class AbstractAwsScalingProcessTaskSpec extends Specification {
}

@Unroll
def "should resume/suspend scaling processes regardless of the target state"() {
def "should only resume/suspend scaling processes that are not already in the target state"() {
given:
def stage = new StageExecutionImpl(PipelineExecutionImpl.newPipeline("orca"), null, context)
def targetServerGroupResolver = Mock(TargetServerGroupResolver) {
1 * resolve(_) >> {
return [targetServerGroup]
def targetServerGroupResolver = Mock(TargetServerGroupResolver) {
1 * resolve(_) >> {
return targetServerGroups
}
}
}

def task = isResume ?
new ResumeAwsScalingProcessTask(resolver: targetServerGroupResolver, katoService: katoService) :
new SuspendAwsScalingProcessTask(resolver: targetServerGroupResolver, katoService: katoService)
def task = isResume ?
new ResumeAwsScalingProcessTask(resolver: targetServerGroupResolver, katoService: katoService) :
new SuspendAwsScalingProcessTask(resolver: targetServerGroupResolver, katoService: katoService)

when:
def result = task.execute(stage)
def result = task.execute(stage)
def outputs = result.context
def globalOutputs = result.outputs

then:
outputs.processes == expectedScalingProcesses
outputs.containsKey("kato.last.task.id") == !expectedScalingProcesses.isEmpty()
globalOutputs["scalingProcesses.${context.asgName}" as String] == expectedScalingProcesses
outputs.processes == expectedScalingProcesses
outputs.containsKey("kato.last.task.id") == !expectedScalingProcesses.isEmpty()
globalOutputs["scalingProcesses.${context.asgName}" as String] == expectedScalingProcesses

where:
isResume | context | targetServerGroup || expectedScalingProcesses
true | stageData(["Launch"]) | asg(["Launch"]) || ["Launch"]
true | stageData([], ["Launch"]) | asg(["Launch"]) || ["Launch"]
true | stageData(["Launch"]) | asg([]) || ["Launch"]
true | stageData(["Launch"]) | asg([]) || ["Launch"]
false | stageData(["Launch"]) | asg([]) || ["Launch"]
false | stageData([], ["Launch"]) | asg([]) || ["Launch"]
false | stageData(["Launch"]) | asg(["Launch"]) || ["Launch"]
isResume | context | targetServerGroups || expectedScalingProcesses
true | sD("targetAsg", ["Launch"]) | [tSG("targetAsg", ["Launch"])] || ["Launch"]
true | sD("targetAsg", [], ["Launch"]) | [tSG("targetAsg", ["Launch"])] || ["Launch"]
true | sD("targetAsg", ["Launch"]) | [tSG("targetAsg", [])] || []
true | sD("targetAsg", ["Launch"]) | [tSG("targetAsg", [])] || []
false | sD("targetAsg", ["Launch"]) | [tSG("targetAsg", [])] || ["Launch"]
false | sD("targetAsg", [], ["Launch"]) | [tSG("targetAsg", [])] || ["Launch"]
false | sD("targetAsg", ["Launch"]) | [tSG("targetAsg", ["Launch"])] || []
}

private Map<String, Object> stageData(List<String> processes,
private Map<String, Object> sD(String asgName,
List<String> processes,
List<String> globalProcesses = [],
String region = "us-west-1",
String asgName = "targetAsg") {
String region = "us-west-1") {
return [
asgName: asgName, processes: processes, regions: [region], ("scalingProcesses.${asgName}" as String): globalProcesses
]
}

private TargetServerGroup asg(List<String> suspendedProcesses, String region = "us-west-1", String asgName = "targetAsg") {
private TargetServerGroup tSG(String name, List<String> suspendedProcesses, String region = "us-west-1") {
return new TargetServerGroup(
name: asgName,
name: name,
region: region,
asg : [
suspendedProcesses: suspendedProcesses.collect {
Expand All @@ -90,42 +90,42 @@ class AbstractAwsScalingProcessTaskSpec extends Specification {

def "should get target reference dynamically when stage is dynamic"() {
given:
def tsg = asg(["Launch"])
def resolver = GroovySpy(TargetServerGroupResolver, global: true)
GroovySpy(TargetServerGroup, global: true, constructorArgs: [tsg])
def tsg = tSG("targetAsg", ["Launch"])
def resolver = GroovySpy(TargetServerGroupResolver, global: true)
GroovySpy(TargetServerGroup, global: true, constructorArgs: [tsg])

def stage = new StageExecutionImpl(PipelineExecutionImpl.newPipeline("orca"), null, stageData(["Launch"]))
def task = new ResumeAwsScalingProcessTask(resolver: resolver, katoService: katoService)
def stage = new StageExecutionImpl(PipelineExecutionImpl.newPipeline("orca"), null, sD("targetAsg", ["Launch"]))
def task = new ResumeAwsScalingProcessTask(resolver: resolver, katoService: katoService)

when:
task.execute(stage)
task.execute(stage)

then:
TargetServerGroup.isDynamicallyBound(stage) >> true
TargetServerGroupResolver.fromPreviousStage(stage) >> tsg
TargetServerGroup.isDynamicallyBound(stage) >> true
TargetServerGroupResolver.fromPreviousStage(stage) >> tsg
}

def "should send asg name to kato when dynamic references configured"() {
given:
def tsg = asg(["Launch"])
GroovySpy(TargetServerGroup, global: true, constructorArgs: [tsg])
def resolver = GroovySpy(TargetServerGroupResolver, global: true)
KatoService katoService = Mock(KatoService)
def tsg = tSG("targetAsg", ["Launch"])
GroovySpy(TargetServerGroup, global: true, constructorArgs: [tsg])
def resolver = GroovySpy(TargetServerGroupResolver, global: true)
KatoService katoService = Mock(KatoService)

def ctx = stageData(["Launch"])
ctx.cloudProvider = "abc"
def ctx = sD("targetAsg", ["Launch"])
ctx.cloudProvider = "abc"
def stage = new StageExecutionImpl(PipelineExecutionImpl.newPipeline("orca"), null, ctx)
def task = new ResumeAwsScalingProcessTask(resolver: resolver, katoService: katoService)
def task = new ResumeAwsScalingProcessTask(resolver: resolver, katoService: katoService)

when:
task.execute(stage)
task.execute(stage)

then:
TargetServerGroup.isDynamicallyBound(stage) >> true
TargetServerGroupResolver.fromPreviousStage(stage) >> tsg
katoService.requestOperations("abc", { Map m -> m.resumeAsgProcessesDescription.asgName == "targetAsg" }) >> {
return new TaskId(id: "1")
}
0 * katoService.requestOperations(_, _)
TargetServerGroup.isDynamicallyBound(stage) >> true
TargetServerGroupResolver.fromPreviousStage(stage) >> tsg
katoService.requestOperations("abc", { Map m -> m.resumeAsgProcessesDescription.asgName == "targetAsg" }) >> {
return new TaskId(id: "1")
}
0 * katoService.requestOperations(_, _)
}
}

0 comments on commit 6c24065

Please sign in to comment.