Skip to content

Commit

Permalink
chore(dependencies): Upgrade Spring Boot to 2.2.1 (#4168)
Browse files Browse the repository at this point in the history
* chore(dependencies): Upgrade Spring Boot to 2.2.1
  • Loading branch information
scottfrederick authored and robzienert committed Dec 11, 2019
1 parent fdd8b9a commit 4981910
Show file tree
Hide file tree
Showing 14 changed files with 30 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ protected Set<String> scanMembers(String setKey, Optional<String> glob) {
while (true) {
final ScanResult<String> scanResult = client.sscan(setKey, cursor, scanParams);
matches.addAll(scanResult.getResult());
cursor = scanResult.getStringCursor();
cursor = scanResult.getCursor();
if ("0".equals(cursor)) {
return matches;
}
Expand Down Expand Up @@ -234,7 +234,7 @@ protected String allOfTypeId(String type) {
return String.join(":", prefix, type, "members");
}

protected TypeReference getRelationshipsTypeReference() {
protected TypeReference<? extends Collection<String>> getRelationshipsTypeReference() {
return options.isTreatRelationshipsAsSet() ? RELATIONSHIPS_SET : RELATIONSHIPS_LIST;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.params.SetParams;

public class ClusteredAgentScheduler extends CatsModuleAware
implements AgentScheduler<AgentLock>, Runnable {
Expand Down Expand Up @@ -181,9 +182,7 @@ private boolean acquireRunKey(String agentType, long timeout) {
client.set(
agentType,
nodeIdentity.getNodeIdentity(),
SET_IF_NOT_EXIST,
SET_EXPIRE_TIME_MILLIS,
timeout);
SetParams.setParams().nx().px(timeout));
return SUCCESS_RESPONSE.equals(response);
});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService
import com.netflix.spinnaker.kork.jedis.JedisClientDelegate
import redis.clients.jedis.Jedis
import redis.clients.jedis.JedisPool
import redis.clients.jedis.params.SetParams
import spock.lang.Specification
import spock.lang.Subject

Expand Down Expand Up @@ -76,7 +77,7 @@ class ClusteredAgentSchedulerSpec extends Specification {
agentExecutionScheduler.runAll()

then:
1 * jedis.set(_ as String, _ as String, 'NX', 'PX', _ as Long) >> 'definitely not ok'
1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'definitely not ok'
1 * jedis.close()
0 * _
}
Expand All @@ -88,7 +89,7 @@ class ClusteredAgentSchedulerSpec extends Specification {
agentExecutionScheduler.runAll()

then:
1 * jedis.set(_ as String, _ as String, 'NX', 'PX', _ as Long) >> 'OK'
1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK'
1 * inst.executionStarted(agent)
1 * exec.executeAgent(agent)
1 * inst.executionCompleted(agent, _)
Expand All @@ -107,7 +108,7 @@ class ClusteredAgentSchedulerSpec extends Specification {
agentExecutionScheduler.runAll()

then:
1 * jedis.set(_ as String, _ as String, 'NX', 'PX', _ as Long) >> 'OK'
1 * jedis.set(_ as String, _ as String, _ as SetParams) >> 'OK'
1 * inst.executionStarted(agent)
1 * exec.executeAgent(agent) >> { throw cause }
1 * inst.executionFailed(agent, cause)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ static Map<String, Object> cacheView(Object o) {
"resource", cacheViewMapper.convertValue(o, new TypeReference<Map<String, Object>>() {}));
}

Map<String, Collection<CacheData>> getCacheResultsFromCacheData(CacheData cacheData) {
Map<String, Collection<ResourceCacheData>> getCacheResultsFromCacheData(CacheData cacheData) {
try {
return cacheViewMapper.readValue(
cacheData.getAttributes().get("cacheResults").toString(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ private CacheData setCacheData(
String key = Keys.getLoadBalancerKey(account, cloudFoundryLoadBalancer);
CacheData lbCacheData = onDemandCacheDataToKeep.get(key);
if (lbCacheData != null && (long) lbCacheData.getAttributes().get("cacheTime") > start) {
Map<String, Collection<CacheData>> cacheResults = getCacheResultsFromCacheData(lbCacheData);
Map<String, Collection<ResourceCacheData>> cacheResults =
getCacheResultsFromCacheData(lbCacheData);
onDemandCacheDataToKeep.remove(key);
return cacheResults.get(LOAD_BALANCERS.getNs()).stream().findFirst().orElse(null);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,8 @@ private CacheData setServerGroupCacheData(
String key = Keys.getServerGroupKey(account, serverGroup.getName(), serverGroup.getRegion());
CacheData sgCacheData = onDemandCacheDataToKeep.get(key);
if (sgCacheData != null && (long) sgCacheData.getAttributes().get("cacheTime") > start) {
Map<String, Collection<CacheData>> cacheResults = getCacheResultsFromCacheData(sgCacheData);
Map<String, Collection<ResourceCacheData>> cacheResults =
getCacheResultsFromCacheData(sgCacheData);
onDemandCacheDataToKeep.remove(key);
return cacheResults.get(SERVER_GROUPS.getNs()).stream().findFirst().orElse(null);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ private Set<String> scanMembers(String setKey) {
while (true) {
final ScanResult<String> scanResult = client.sscan(setKey, cursor, scanParams);
matches.addAll(scanResult.getResult());
cursor = scanResult.getStringCursor();
cursor = scanResult.getCursor();
if ("0".equals(cursor)) {
return matches;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ void handle_serverGroupExists() throws IOException {

CacheData cacheData =
providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myservergroup-v001"));
Map<String, Collection<CacheData>> cacheResults =
Map<String, Collection<DefaultCacheData>> cacheResults =
objectMapper.readValue(
(String) cacheData.getAttributes().get("cacheResults"),
new TypeReference<Map<String, Collection<DefaultCacheData>>>() {});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ void handle_serverGroupExists() throws IOException {

CacheData cacheData =
providerCache.get(ON_DEMAND.getNs(), serverGroupKey("myservergroup-v001"));
Map<String, Collection<CacheData>> cacheResults =
Map<String, Collection<DefaultCacheData>> cacheResults =
objectMapper.readValue(
(String) cacheData.getAttributes().get("cacheResults"),
new TypeReference<Map<String, Collection<DefaultCacheData>>>() {});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ public CacheResult loadData(ProviderCache providerCache) {
"{}: On demand entry contents overwriting load data entry: {}",
getAgentType(),
onDemandResultsJson);
Map<String, Collection<CacheData>> onDemandResults;
Map<String, List<DefaultCacheData>> onDemandResults;
try {
onDemandResults =
objectMapper.readValue(
Expand All @@ -171,10 +171,11 @@ public CacheResult loadData(ProviderCache providerCache) {
}

protected void mergeCacheResults(
Map<String, Collection<CacheData>> current, Map<String, Collection<CacheData>> added) {
Map<String, Collection<CacheData>> current,
Map<String, ? extends Collection<? extends CacheData>> added) {
for (String group : added.keySet()) {
Collection<CacheData> currentByGroup = current.get(group);
Collection<CacheData> addedByGroup = added.get(group);
Collection<? extends CacheData> addedByGroup = added.get(group);

currentByGroup = currentByGroup == null ? new ArrayList<>() : currentByGroup;
addedByGroup = addedByGroup == null ? new ArrayList<>() : addedByGroup;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ public void deploymentEviction(int numCachingAgents) throws IOException {
onDemandResult.getOnDemandEvictions().get(DEPLOYMENT_KIND);
assertThat(deploymentEvictions).containsExactly(expectedKey);

Collection<CacheData> remainingItems =
Collection<DefaultCacheData> remainingItems =
Optional.ofNullable(onDemandResult.getOnDemandEntries().get(DEPLOYMENT_KIND))
.orElse(ImmutableList.of());
// We expect that exactly one caching agent processed the request, so the entry should have been
Expand Down Expand Up @@ -353,7 +353,7 @@ public void storageClassEviction(int numCachingAgents) throws IOException {

// We expect that exactly one caching agent processed the request, so the entry should have been
// evicted once
Collection<CacheData> remainingItems =
Collection<DefaultCacheData> remainingItems =
Optional.ofNullable(onDemandResult.getOnDemandEntries().get(STORAGE_CLASS_KIND))
.orElse(ImmutableList.of());
assertThat(remainingItems).hasSize(numCachingAgents - 1);
Expand Down Expand Up @@ -402,7 +402,7 @@ public void wrongNamespace(int numCachingAgents) {
private static class ProcessOnDemandResult {
Map<String, Collection<CacheData>> onDemandResults;
Map<String, Collection<String>> onDemandEvictions;
Map<String, Collection<CacheData>> onDemandEntries;
Map<String, Collection<DefaultCacheData>> onDemandEntries;

ProcessOnDemandResult(
Collection<OnDemandAgent.OnDemandResult> onDemandResults,
Expand Down Expand Up @@ -454,7 +454,7 @@ private static ImmutableMap<String, Collection<CacheData>> extractCacheResults(
}

/** Given a collection of ProviderCache, return all on-demand entries in these caches. */
private static ImmutableMap<String, Collection<CacheData>> extractCacheEntries(
private static ImmutableMap<String, Collection<DefaultCacheData>> extractCacheEntries(
Collection<ProviderCache> providerCaches) {
return providerCaches.stream()
.map(providerCache -> providerCache.getAll("onDemand"))
Expand All @@ -463,7 +463,7 @@ private static ImmutableMap<String, Collection<CacheData>> extractCacheEntries(
.map(
cacheData -> {
try {
return objectMapper.<Map<String, Collection<CacheData>>>readValue(
return objectMapper.readValue(
(String) cacheData.getAttributes().get("cacheResults"),
new TypeReference<Map<String, Collection<DefaultCacheData>>>() {});
} catch (IOException e) {
Expand Down Expand Up @@ -539,7 +539,7 @@ private static LoadDataResult processLoadData(
@Value
private static class LoadDataResult {
Map<String, Collection<CacheData>> results;
Map<String, Collection<CacheData>> cacheEntries;
Map<String, Collection<DefaultCacheData>> cacheEntries;

LoadDataResult(
Collection<CacheResult> loadDataResults, Collection<ProviderCache> providerCaches) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TaskMapper(
companion object {
private val log = LoggerFactory.getLogger(TaskMapper::class.java)

private val SAGA_IDS_TYPE = object : TypeReference<Set<SagaId>>() {}
private val SAGA_IDS_TYPE = object : TypeReference<MutableSet<SagaId>>() {}
}

fun map(rs: ResultSet): Collection<Task> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper
import com.netflix.spinnaker.clouddriver.security.config.SecurityConfig
import com.netflix.spinnaker.kork.boot.DefaultPropertiesBuilder
import com.netflix.spinnaker.kork.configserver.ConfigServerBootstrap
import org.springframework.boot.actuate.autoconfigure.elasticsearch.ElasticSearchJestHealthIndicatorAutoConfiguration
import org.springframework.boot.actuate.autoconfigure.elasticsearch.ElasticSearchJestHealthContributorAutoConfiguration
import org.springframework.boot.autoconfigure.EnableAutoConfiguration
import org.springframework.boot.autoconfigure.batch.BatchAutoConfiguration
import org.springframework.boot.autoconfigure.data.elasticsearch.ElasticsearchAutoConfiguration
Expand Down Expand Up @@ -56,7 +56,7 @@ import java.security.Security
GsonAutoConfiguration,
DataSourceAutoConfiguration,
ElasticsearchAutoConfiguration,
ElasticSearchJestHealthIndicatorAutoConfiguration,
ElasticSearchJestHealthContributorAutoConfiguration,
JestAutoConfiguration
])
@EnableScheduling
Expand Down
2 changes: 1 addition & 1 deletion gradle.properties
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
includeCloudProviders=all
fiatVersion=1.9.2
enablePublishing=false
korkVersion=6.22.1
korkVersion=7.0.0
spinnakerGradleVersion=7.0.2
org.gradle.parallel=true

0 comments on commit 4981910

Please sign in to comment.