Skip to content

Commit

Permalink
fix(aws/cache): Reverts authoritative cluster cache change (#4669)
Browse files Browse the repository at this point in the history
This has turned out to have a bunch of sneaky side-effects because of the
way the cluster relationships are built up, and didn't end up fixing the
issue I had hoped it would when you delete the last server group in a
region so for now I'm going to just back it out and will revisit it in
the future.

* Revert "fix(aws/cache): include informative results in CatsClusterCachingAgent (#4668)"

This reverts commit b562da8.

* Revert "fix(aws/cache): index authoritative clusters by application (#4632)"

This reverts commit 64a9b46.

* Revert "fix(cache): adds a proper authoritative clusters source for aws/titus (#4615)"

This reverts commit 467ad58.
  • Loading branch information
cfieber committed Jun 11, 2020
1 parent b562da8 commit b23812f
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 364 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import com.netflix.spinnaker.cats.cache.DefaultCacheData
import com.netflix.spinnaker.cats.cache.WriteableCache
import com.netflix.spinnaker.cats.provider.ProviderCache
import com.netflix.spinnaker.cats.sql.cache.SqlCache
import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.CLUSTERS
import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.NAMED_IMAGES
import com.netflix.spinnaker.clouddriver.core.provider.agent.Namespace.ON_DEMAND
import kotlin.contracts.ExperimentalContracts
import org.slf4j.LoggerFactory
Expand Down Expand Up @@ -163,6 +165,28 @@ class SqlProviderCache(private val backingStore: WriteableCache) : ProviderCache
try {
MDC.put("agentClass", "$source putCacheResult")

// TODO every source type should have an authoritative agent and every agent should be authoritative for something
// TODO terrible hack because no AWS agent is authoritative for clusters, fix in ClusterCachingAgent
// TODO same with namedImages - fix in AWS ImageCachingAgent
if (
source.contains("clustercaching", ignoreCase = true) &&
!authoritativeTypes.contains(CLUSTERS.ns) &&
cacheResult.cacheResults
.any {
it.key.startsWith(CLUSTERS.ns)
}
) {
authoritativeTypes.add(CLUSTERS.ns)
} else if (
source.contains("imagecaching", ignoreCase = true) &&
cacheResult.cacheResults
.any {
it.key.startsWith(NAMED_IMAGES.ns)
}
) {
authoritativeTypes.add(NAMED_IMAGES.ns)
}

cacheResult.cacheResults
.filter {
it.key.contains(ON_DEMAND.ns, ignoreCase = true)
Expand Down

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ class ClusterCachingAgent implements CachingAgent, OnDemandAgent, AccountAware,
private static final TypeReference<Map<String, Object>> ATTRIBUTES = new TypeReference<Map<String, Object>>() {}

static final Set<AgentDataType> types = Collections.unmodifiableSet([
AUTHORITATIVE.forType(CLUSTERS.ns),
AUTHORITATIVE.forType(SERVER_GROUPS.ns),
AUTHORITATIVE.forType(APPLICATIONS.ns),
INFORMATIVE.forType(CLUSTERS.ns),
INFORMATIVE.forType(LOAD_BALANCERS.ns),
INFORMATIVE.forType(TARGET_GROUPS.ns),
INFORMATIVE.forType(LAUNCH_CONFIGS.ns),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import com.netflix.spinnaker.cats.agent.Agent
import com.netflix.spinnaker.cats.agent.AgentProvider
import com.netflix.spinnaker.clouddriver.aws.AmazonCloudProvider
import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonApplicationLoadBalancerCachingAgent
import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonAuthoritativeClustersAgent
import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonCertificateCachingAgent
import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonCloudFormationCachingAgent
import com.netflix.spinnaker.clouddriver.aws.provider.agent.AmazonLaunchTemplateCachingAgent
Expand Down Expand Up @@ -127,12 +126,6 @@ class AwsProviderConfig {
//only index public images once per region
Set<String> publicRegions = []

// If there is an agent scheduler, then this provider has been through the AgentController in the past.
// We only need to add the AmazonAuthoritativeClustersAgent once.
if (!awsProvider.agentScheduler) {
newlyAddedAgents << new AmazonAuthoritativeClustersAgent();
}

//sort the accounts in case of a reconfigure, we are more likely to re-index the public images in the same caching agent
//TODO(cfieber)-rework this is after rework of AWS Image/NamedImage keys
allAccounts.sort { it.name }.each { NetflixAmazonCredentials credentials ->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,22 +203,18 @@ class AmazonClusterProvider implements ClusterProvider<AmazonCluster>, ServerGro
allImages
)

Collection<AmazonCluster> clusters = cacheResults[CLUSTERS.ns].findResults { clusterData ->
Collection<AmazonCluster> clusters = cacheResults[CLUSTERS.ns].collect { clusterData ->
Map<String, String> clusterKey = Keys.parse(clusterData.id)

if (clusterKey == null) {
return null
}

AmazonCluster cluster = new AmazonCluster()
cluster.accountName = clusterKey.account
cluster.name = clusterKey.cluster

cluster.serverGroups = clusterData.relationships[SERVER_GROUPS.ns]?.findResults { serverGroups.get(it) } ?: []
cluster.loadBalancers = clusterData.relationships[LOAD_BALANCERS.ns]?.findResults { loadBalancers.get(it) } ?: []
cluster.targetGroups = clusterData.relationships[TARGET_GROUPS.ns]?.findResults { targetGroups.get(it) } ?: []
cluster.serverGroups = clusterData.relationships[SERVER_GROUPS.ns]?.findResults { serverGroups.get(it) }
cluster.loadBalancers = clusterData.relationships[LOAD_BALANCERS.ns]?.findResults { loadBalancers.get(it) }
cluster.targetGroups = clusterData.relationships[TARGET_GROUPS.ns]?.findResults { targetGroups.get(it) }

return cluster
cluster
}

return clusters
Expand Down Expand Up @@ -251,20 +247,20 @@ class AmazonClusterProvider implements ClusterProvider<AmazonCluster>, ServerGro
AmazonCluster cluster = new AmazonCluster()
cluster.accountName = clusterKey.account
cluster.name = clusterKey.cluster
cluster.serverGroups = clusterDataEntry.relationships[SERVER_GROUPS.ns]?.findResults { serverGroups.get(it) } ?: []
cluster.serverGroups = clusterDataEntry.relationships[SERVER_GROUPS.ns]?.findResults { serverGroups.get(it) }

if (includeDetails) {
cluster.loadBalancers = clusterDataEntry.relationships[LOAD_BALANCERS.ns]?.findResults { loadBalancers.get(it) } ?: []
cluster.targetGroups = clusterDataEntry.relationships[TARGET_GROUPS.ns]?.findResults { targetGroups.get(it) } ?: []
cluster.loadBalancers = clusterDataEntry.relationships[LOAD_BALANCERS.ns]?.findResults { loadBalancers.get(it) }
cluster.targetGroups = clusterDataEntry.relationships[TARGET_GROUPS.ns]?.findResults { targetGroups.get(it) }
} else {
cluster.loadBalancers = clusterDataEntry.relationships[LOAD_BALANCERS.ns]?.collect { loadBalancerKey ->
Map parts = Keys.parse(loadBalancerKey)
new AmazonLoadBalancer(name: parts.loadBalancer, account: parts.account, region: parts.region)
} ?: []
}
cluster.targetGroups = clusterDataEntry.relationships[TARGET_GROUPS.ns]?.collect { targetGroupKey ->
Map parts = Keys.parse(targetGroupKey)
new AmazonTargetGroup(name: parts.loadBalancer, account: parts.account, region: parts.region)
} ?: []
}
}
cluster
}
Expand Down
Loading

0 comments on commit b23812f

Please sign in to comment.