Skip to content

Commit

Permalink
fix(redblack): fix red/black deploy (#3182)
Browse files Browse the repository at this point in the history
The main issue being resolved here is how we determine if a source ASG is present during R/B deploy.
In some cases, the `.asgName` property is used while others `.serverGroupName` is used. If we don't check both, some UI (tasks) operations don't work correctly,
e.g. clone with red/black and leave max 2 remaining ASGs will fail to disable the old ASG as well skip the shrink step.

While here, I am also making similar change to RRB and MD strategies.

Additionally, cleaning up the `composeFlow` -> `composeBefore/AfterFlow` for strategies as a follow up from PR #3133
  • Loading branch information
marchello2000 committed Sep 20, 2019
1 parent 752e442 commit aa5a675
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 140 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ abstract class AbstractDeployStrategyStage extends AbstractCloudProviderAwareSta
Strategy strategy = getStrategy(parent)
def preProcessors = deployStagePreProcessors.findAll { it.supports(parent) }
def stageData = parent.mapTo(StageData)
def stages = []
List<Stage> stages = new ArrayList<>()
stages.addAll(strategy.composeBeforeStages(parent))

preProcessors.each {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import com.netflix.spinnaker.orca.clouddriver.pipeline.cluster.ScaleDownClusterS
import com.netflix.spinnaker.orca.clouddriver.pipeline.monitoreddeploy.NotifyDeployCompletedStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.monitoreddeploy.NotifyDeployStartingStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.monitoreddeploy.EvaluateDeploymentHealthStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.CloneServerGroupStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.CreateServerGroupStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.DisableServerGroupStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.ResizeServerGroupStage
Expand Down Expand Up @@ -74,18 +75,28 @@ class MonitoredDeployStrategy implements Strategy {
DeploymentMonitorServiceProvider deploymentMonitorServiceProvider

@Override
List<Stage> composeBeforeStages(Stage parent) {
return composeFlow(parent)
.findAll({ it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE })
}
List<Stage> composeBeforeStages(Stage stage) {
if (stage.context.useSourceCapacity) {
stage.context.useSourceCapacity = false
}

@Override
List<Stage> composeAfterStages(Stage parent) {
return composeFlow(parent)
.findAll({ it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER })
// we expect a capacity object if a fixed capacity has been requested or as a fallback value when we are copying
// the capacity from the current server group
def savedCapacity = stage.context.savedCapacity ?: stage.context.capacity?.clone()
stage.context.savedCapacity = savedCapacity

// FIXME: this clobbers the input capacity value (if any). Should find a better way to request a new asg of size 0
stage.context.capacity = [
min : 0,
max : 0,
desired: 0
]

return Collections.emptyList()
}

List<Stage> composeFlow(Stage stage) {
@Override
List<Stage> composeAfterStages(Stage stage) {
def stages = []
def stageData = stage.mapTo(MonitoredDeployStageData)
def cleanupConfig = AbstractDeployStrategyStage.CleanupConfig.fromStage(stage)
Expand All @@ -98,21 +109,9 @@ class MonitoredDeployStrategy implements Strategy {
cloudProvider : cleanupConfig.cloudProvider,
]

if (stage.context.useSourceCapacity) {
stage.context.useSourceCapacity = false
}

// we expect a capacity object if a fixed capacity has been requested or as a fallback value when we are copying
// the capacity from the current server group
def savedCapacity = stage.context.savedCapacity ?: stage.context.capacity?.clone()
stage.context.savedCapacity = savedCapacity

// FIXME: this clobbers the input capacity value (if any). Should find a better way to request a new asg of size 0
stage.context.capacity = [
min : 0,
max : 0,
desired: 0
]
def savedCapacity = stage.context.savedCapacity

def deploySteps = stageData.getDeploySteps()
if (deploySteps.isEmpty() || deploySteps[-1] != 100) {
Expand All @@ -132,23 +131,16 @@ class MonitoredDeployStrategy implements Strategy {
try {
StageData.Source sourceServerGroup

Stage parentCreateServerGroupStage = stage.directAncestors().find() { it.type == CreateServerGroupStage.PIPELINE_CONFIG_TYPE }

if (parentCreateServerGroupStage.status == ExecutionStatus.NOT_STARTED) {
// No point in composing the flow if we are called to plan "beforeStages" since we don't have any STAGE_BEFOREs.
// Also, we rely on the the source server group task to have run already.
// In the near future we will move composeFlow into beforeStages and afterStages instead of the
// deprecated aroundStages
return []
}
Stage parentCreateServerGroupStage = stage.directAncestors()
.find() { it.type == CreateServerGroupStage.PIPELINE_CONFIG_TYPE || it.type == CloneServerGroupStage.PIPELINE_CONFIG_TYPE }

StageData parentStageData = parentCreateServerGroupStage.mapTo(StageData)
sourceServerGroup = parentStageData.source

if (sourceServerGroup != null && sourceServerGroup.serverGroupName != null) {
if (sourceServerGroup != null && (sourceServerGroup.serverGroupName != null || sourceServerGroup.asgName != null)) {
source = new ResizeStrategy.Source(
region: sourceServerGroup.region,
serverGroupName: sourceServerGroup.serverGroupName,
serverGroupName: sourceServerGroup.serverGroupName ?: sourceServerGroup.asgName,
credentials: stageData.credentials ?: stageData.account,
cloudProvider: stageData.cloudProvider
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,38 +53,25 @@ class RedBlackStrategy implements Strategy, ApplicationContextAware {

@Override
List<Stage> composeBeforeStages(Stage parent) {
return composeFlow(parent)
.findAll({ it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE })
return Collections.emptyList()
}

@Override
List<Stage> composeAfterStages(Stage parent) {
return composeFlow(parent)
.findAll({ it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER })
}

List<Stage> composeFlow(Stage stage) {
List<Stage> stages = []
List<Stage> composeAfterStages(Stage stage) {
List<Stage> stages = new ArrayList<>()
StageData stageData = stage.mapTo(StageData)
Map<String, Object> baseContext = AbstractDeployStrategyStage.CleanupConfig.toContext(stageData)

Stage parentCreateServerGroupStage = stage.directAncestors().find() { it.type == CreateServerGroupStage.PIPELINE_CONFIG_TYPE || it.type == CloneServerGroupStage.PIPELINE_CONFIG_TYPE }
if (parentCreateServerGroupStage == null) {
throw new IllegalStateException("Failed to determine source server group from parent stage while planning red/black flow")
}
if (parentCreateServerGroupStage.status == ExecutionStatus.NOT_STARTED) {
// No point in composing the flow if we are called to plan "beforeStages" since we don't have any STAGE_BEFOREs.
// Also, we rely on the the source server group task to have run already.
// In the near future we will move composeFlow into beforeStages and afterStages instead of the
// deprecated aroundStages
return []
}

StageData parentStageData = parentCreateServerGroupStage.mapTo(StageData)
StageData.Source sourceServerGroup = parentStageData.source

// Short-circuit if there is no source server group
if (sourceServerGroup == null || sourceServerGroup.serverGroupName == null) {
if (sourceServerGroup == null || (sourceServerGroup.serverGroupName == null && sourceServerGroup.asgName == null)) {
return []
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
*/
package com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.strategies

import com.netflix.spinnaker.orca.ExecutionStatus
import com.netflix.spinnaker.orca.clouddriver.pipeline.cluster.ScaleDownClusterStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.CloneServerGroupStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.CreateServerGroupStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.DisableServerGroupStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.ResizeServerGroupStage
Expand Down Expand Up @@ -60,42 +60,20 @@ class RollingRedBlackStrategy implements Strategy, ApplicationContextAware {
ScaleDownClusterStage scaleDownClusterStage

@Override
List<Stage> composeBeforeStages(Stage parent) {
return composeFlow(parent)
.findAll({ it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE })
}

@Override
List<Stage> composeAfterStages(Stage parent) {
return composeFlow(parent)
.findAll({ it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER })
}

List<Stage> composeFlow(Stage stage) {
List<Stage> composeBeforeStages(Stage stage) {
if (!pipelineStage) {
throw new IllegalStateException("Rolling red/black cannot be run without front50 enabled. Please set 'front50.enabled: true' in your orca config.")
}

def stages = []
def stageData = stage.mapTo(RollingRedBlackStageData)
def cleanupConfig = AbstractDeployStrategyStage.CleanupConfig.fromStage(stage)

Map baseContext = [
(cleanupConfig.location.singularType()): cleanupConfig.location.value,
cluster : cleanupConfig.cluster,
moniker : cleanupConfig.moniker,
credentials : cleanupConfig.account,
cloudProvider : cleanupConfig.cloudProvider,
]
if (stage.context.useSourceCapacity) {
stage.context.useSourceCapacity = false
}

if (stageData.targetSize) {
stage.context.targetSize = 0
}

if (stage.context.useSourceCapacity) {
stage.context.useSourceCapacity = false
}

// we expect a capacity object if a fixed capacity has been requested or as a fallback value when we are copying
// the capacity from the current server group
def savedCapacity = stage.context.savedCapacity ?: stage.context.capacity?.clone()
Expand All @@ -108,6 +86,27 @@ class RollingRedBlackStrategy implements Strategy, ApplicationContextAware {
desired: 0
]

return Collections.emptyList()
}

@Override
List<Stage> composeAfterStages(Stage stage) {
def stages = []
def stageData = stage.mapTo(RollingRedBlackStageData)
def cleanupConfig = AbstractDeployStrategyStage.CleanupConfig.fromStage(stage)

Map baseContext = [
(cleanupConfig.location.singularType()): cleanupConfig.location.value,
cluster : cleanupConfig.cluster,
moniker : cleanupConfig.moniker,
credentials : cleanupConfig.account,
cloudProvider : cleanupConfig.cloudProvider,
]

// we expect a capacity object if a fixed capacity has been requested or as a fallback value when we are copying
// the capacity from the current server group
def savedCapacity = stage.context.savedCapacity

def targetPercentages = stageData.getTargetPercentages()
if (targetPercentages.isEmpty() || targetPercentages[-1] != 100) {
targetPercentages.add(100)
Expand All @@ -124,23 +123,16 @@ class RollingRedBlackStrategy implements Strategy, ApplicationContextAware {
try {
StageData.Source sourceServerGroup

Stage parentCreateServerGroupStage = stage.directAncestors().find() { it.type == CreateServerGroupStage.PIPELINE_CONFIG_TYPE }

if (parentCreateServerGroupStage.status == ExecutionStatus.NOT_STARTED) {
// No point in composing the flow if we are called to plan "beforeStages" since we don't have any STAGE_BEFOREs.
// Also, we rely on the the source server group task to have run already.
// In the near future we will move composeFlow into beforeStages and afterStages instead of the
// deprecated aroundStages
return []
}
Stage parentCreateServerGroupStage = stage.directAncestors()
.find() { it.type == CreateServerGroupStage.PIPELINE_CONFIG_TYPE || it.type == CloneServerGroupStage.PIPELINE_CONFIG_TYPE }

StageData parentStageData = parentCreateServerGroupStage.mapTo(StageData)
sourceServerGroup = parentStageData.source

if (sourceServerGroup != null && sourceServerGroup.serverGroupName != null) {
if (sourceServerGroup != null && (sourceServerGroup.serverGroupName != null || sourceServerGroup.asgName != null)) {
source = new ResizeStrategy.Source(
region: sourceServerGroup.region,
serverGroupName: sourceServerGroup.serverGroupName,
serverGroupName: sourceServerGroup.serverGroupName ?: sourceServerGroup.asgName,
credentials: stageData.credentials ?: stageData.account,
cloudProvider: stageData.cloudProvider
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,26 +17,19 @@
package com.netflix.spinnaker.orca.clouddriver.pipeline.servergroup.strategies

import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService
import com.netflix.spinnaker.kork.dynamicconfig.SpringDynamicConfigService
import com.netflix.spinnaker.moniker.Moniker
import com.netflix.spinnaker.orca.ExecutionStatus
import com.netflix.spinnaker.orca.clouddriver.pipeline.cluster.DisableClusterStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.cluster.ScaleDownClusterStage
import com.netflix.spinnaker.orca.clouddriver.pipeline.cluster.ShrinkClusterStage
import com.netflix.spinnaker.orca.clouddriver.utils.TrafficGuard
import com.netflix.spinnaker.orca.pipeline.WaitStage
import com.netflix.spinnaker.orca.pipeline.model.SyntheticStageOwner
import org.springframework.mock.env.MockEnvironment
import spock.lang.Specification

import static com.netflix.spinnaker.orca.test.model.ExecutionBuilder.pipeline
import static com.netflix.spinnaker.orca.test.model.ExecutionBuilder.stage

class RedBlackStrategySpec extends Specification {

def trafficGuard = Stub(TrafficGuard)
def env = new MockEnvironment()

def dynamicConfigService = Mock(DynamicConfigService)

def disableClusterStage = new DisableClusterStage(dynamicConfigService)
Expand Down Expand Up @@ -85,27 +78,23 @@ class RedBlackStrategySpec extends Specification {
def stage = pipeline.stageByRef("2-deploy1")

when: 'planning without having run createServerGroup'
def syntheticStages = strat.composeFlow(stage)
def beforeStages = strat.composeBeforeStages(stage)
def afterStages = strat.composeAfterStages(stage)

then:
syntheticStages.size() == 0
beforeStages.size() == 0
afterStages.size() == 0

when: 'deploying into a new cluster (no source server group)'
pipeline.stageByRef("1-create").status = ExecutionStatus.RUNNING

then:
syntheticStages.size() == 0

when: 'deploying into an existing cluster'
pipeline.stageByRef("1-create").context.source = [
account: "testAccount",
region: "north",
serverGroupName: "unit-tests-v000",
asgName: "unit-tests-v000"
]
syntheticStages = strat.composeFlow(stage)
def beforeStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE }
def afterStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER }
beforeStages = strat.composeBeforeStages(stage)
afterStages = strat.composeAfterStages(stage)

then:
beforeStages.isEmpty()
Expand All @@ -123,9 +112,8 @@ class RedBlackStrategySpec extends Specification {

when: 'maxRemainingAsgs is specified'
pipeline.stageByRef("2-deploy1").context.maxRemainingAsgs = 10
syntheticStages = strat.composeFlow(stage)
beforeStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE }
afterStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER }
beforeStages = strat.composeBeforeStages(stage)
afterStages = strat.composeAfterStages(stage)

then:
beforeStages.isEmpty()
Expand All @@ -136,9 +124,8 @@ class RedBlackStrategySpec extends Specification {

when: 'scaleDown is true'
pipeline.stageByRef("2-deploy1").context.scaleDown = true
syntheticStages = strat.composeFlow(stage)
beforeStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE }
afterStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER }
beforeStages = strat.composeBeforeStages(stage)
afterStages = strat.composeAfterStages(stage)

then:
beforeStages.isEmpty()
Expand All @@ -150,9 +137,8 @@ class RedBlackStrategySpec extends Specification {

when: 'interestingHealthProviderNames is specified'
pipeline.stageByRef("2-deploy1").context.interestingHealthProviderNames = ["Google"]
syntheticStages = strat.composeFlow(stage)
beforeStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE }
afterStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER }
beforeStages = strat.composeBeforeStages(stage)
afterStages = strat.composeAfterStages(stage)

then:
beforeStages.isEmpty()
Expand All @@ -163,9 +149,8 @@ class RedBlackStrategySpec extends Specification {
when: 'delayBeforeDisableSec and delayBeforeScaleDownSec are specified'
pipeline.stageByRef("2-deploy1").context.delayBeforeDisableSec = 5
pipeline.stageByRef("2-deploy1").context.delayBeforeScaleDownSec = 10
syntheticStages = strat.composeFlow(stage)
beforeStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_BEFORE }
afterStages = syntheticStages.findAll { it.syntheticStageOwner == SyntheticStageOwner.STAGE_AFTER }
beforeStages = strat.composeBeforeStages(stage)
afterStages = strat.composeAfterStages(stage)

then:
beforeStages.isEmpty()
Expand Down
Loading

0 comments on commit aa5a675

Please sign in to comment.