From cf4619f335a136791f01d5bec2dd61665d5d1845 Mon Sep 17 00:00:00 2001 From: Asher Feldman Date: Wed, 11 Oct 2017 09:18:34 -0700 Subject: [PATCH] feat(log): annotations for json logging (#282) --- build.gradle | 2 +- .../front50/model/AzureStorageService.java | 48 +++++++++++++---- .../front50/model/StorageServiceSupport.java | 13 +++-- .../model/application/Application.groovy | 11 ++-- .../spinnaker/front50/config/GcsConfig.java | 7 ++- .../front50/model/GcsStorageService.java | 54 +++++++++++-------- .../migrations/LinearToParallelMigration.java | 7 ++- .../front50/migrations/MigrationRunner.java | 5 +- .../pipeline/PipelineRepository.groovy | 2 + .../model/EventingS3ObjectKeyLoader.java | 32 +++++------ .../front50/model/S3StorageService.java | 16 ++++-- .../front50/model/TemporarySQSQueue.java | 14 ++--- .../front50/model/SwiftStorageService.java | 6 ++- 13 files changed, 143 insertions(+), 74 deletions(-) diff --git a/build.gradle b/build.gradle index fbf761150..36cd4e6c5 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ allprojects { project -> group = "com.netflix.spinnaker.front50" ext { - spinnakerDependenciesVersion = project.hasProperty('spinnakerDependenciesVersion') ? project.property('spinnakerDependenciesVersion') : '0.109.2' + spinnakerDependenciesVersion = project.hasProperty('spinnakerDependenciesVersion') ? project.property('spinnakerDependenciesVersion') : '0.111.1' } def checkLocalVersions = [spinnakerDependenciesVersion: spinnakerDependenciesVersion] diff --git a/front50-azure/src/main/java/com/netflix/spinnaker/front50/model/AzureStorageService.java b/front50-azure/src/main/java/com/netflix/spinnaker/front50/model/AzureStorageService.java index fd3ee3cb2..5b6c2da69 100644 --- a/front50-azure/src/main/java/com/netflix/spinnaker/front50/model/AzureStorageService.java +++ b/front50-azure/src/main/java/com/netflix/spinnaker/front50/model/AzureStorageService.java @@ -31,6 +31,8 @@ import java.io.IOException; import java.util.*; +import static net.logstash.logback.argument.StructuredArguments.value; + public class AzureStorageService implements StorageService { private static final Logger log = LoggerFactory.getLogger(AzureStorageService.class); private String containerName; @@ -98,7 +100,10 @@ public T loadObject(ObjectType objectType, String object logStorageException(se, key); } catch (Exception e) { - log.error("Failed to retrieve {} object: {}: {}", objectType.group, key, e.getMessage()); + log.error("Failed to retrieve {} object: {}: {}", + value("group", objectType.group), + value("key", key), + value("exception", e.getMessage())); } return null; } @@ -109,13 +114,17 @@ public void deleteObject(ObjectType objectType, String objectKey) { try { CloudBlockBlob blob = getBlobContainer().getBlockBlobReference(key); if (blob.deleteIfExists(DeleteSnapshotsOption.INCLUDE_SNAPSHOTS, null, null, null)) { - log.info("{} object {} has been successfully deleted", objectType.group, key); + log.info("{} object {} has been successfully deleted", + value("group", objectType.group), + value("key", key)); } writeLastModified(objectType.group); } catch (StorageException se) { logStorageException(se, key); } catch (Exception e) { - log.error("Error encountered attempting to delete {} from storage: {}", key, e.getMessage()); + log.error("Error encountered attempting to delete {} from storage: {}", + value("key", key), + value("exception", e.getMessage())); } } @@ -133,14 +142,18 @@ public void storeObject(ObjectType objectType, String ob } blob.uploadFromByteArray(bytes, 0, bytes.length); writeLastModified(objectType.group); - log.info("{} object {} for has been successfully uploaded.", objectType.group, key); + log.info("{} object {} for has been successfully uploaded.", + value("group", objectType.group), + value("key", key)); } catch (StorageException se) { logStorageException(se, key); } catch (Exception e) { - log.error("Error encountered attempting to store {}: {}", key, e.getMessage()); + log.error("Error encountered attempting to store {}: {}", + value("key", key), + value("exception", e.getMessage())); } } @@ -163,7 +176,9 @@ public Map listObjectKeys(ObjectType objectType) { } catch (StorageException se) { logStorageException(se, ""); } catch (Exception e) { - log.error("Failed to retrieve objects from {}: {}", objectType.group, e.getMessage()); + log.error("Failed to retrieve objects from {}: {}", + value("group", objectType.group), + value("exception", e.getMessage())); } return objectKeys; } @@ -191,7 +206,9 @@ public Collection listObjectVersions(ObjectType objec } catch (StorageException se) { logStorageException(se,fullKey); } catch (Exception e) { - log.error("Error retrieving versions for {} object: {}", fullKey, e.getMessage()); + log.error("Error retrieving versions for {} object: {}", + value("key", fullKey), + value("exception", e.getMessage())); } return results; } @@ -207,7 +224,9 @@ public long getLastModified(ObjectType objectType) { } catch (StorageException se) { logStorageException(se, ""); } catch (Exception e) { - log.error("Exception occurred retrieving last modifed for {}: {}", objectType.group, e.getMessage()); + log.error("Exception occurred retrieving last modifed for {}: {}", + value("group", objectType.group), + value("exception", e.getMessage())); } return 0; } @@ -225,7 +244,7 @@ private void writeLastModified(String group) { logStorageException(se, ""); } catch (Exception e) { - log.error("Exception occurred setting last modified date/time: {} ", e.getMessage()); + log.error("Exception occurred setting last modified date/time: {} ", value("exception", e.getMessage())); } } @@ -242,9 +261,16 @@ private void logStorageException(StorageException storageException, String key) String errorMsg = storageException.getExtendedErrorInformation().getErrorMessage(); String errorCode = storageException.getExtendedErrorInformation().getErrorCode(); if (key.isEmpty()) { - log.error("Exception occurred accessing object(s) from storage: HTTPStatusCode {} ErrorCode: {} {}", storageException.getHttpStatusCode(), errorCode, errorMsg); + log.error("Exception occurred accessing object(s) from storage: HTTPStatusCode {} ErrorCode: {} {}", + value("responseStatus", storageException.getHttpStatusCode()), + value("errorCode", errorCode), + value("errorMsg", errorMsg)); } else { - log.error("Exception occurred accessing object(s) from storage: Key {} HTTPStatusCode {} ErrorCode: {} {}", key, storageException.getHttpStatusCode(), errorCode, errorMsg); + log.error("Exception occurred accessing object(s) from storage: Key {} HTTPStatusCode {} ErrorCode: {} {}", + value("key", key), + value("responseStatus", storageException.getHttpStatusCode()), + value("errorCode", errorCode), + value("errorMsg", errorMsg)); } } diff --git a/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/StorageServiceSupport.java b/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/StorageServiceSupport.java index e7423c8fe..7c43d352c 100644 --- a/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/StorageServiceSupport.java +++ b/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/StorageServiceSupport.java @@ -36,6 +36,7 @@ import java.util.function.ToDoubleFunction; import java.util.stream.Collectors; +import static net.logstash.logback.argument.StructuredArguments.value; public abstract class StorageServiceSupport { private static final long HEALTH_MILLIS = TimeUnit.SECONDS.toMillis(90); @@ -145,7 +146,9 @@ public Collection all(boolean refresh) { long lastModified = readLastModified(); if (lastModified > lastRefreshedTime.get() || allItemsCache.get() == null) { // only refresh if there was a modification since our last refresh cycle - log.debug("all() forcing refresh (lastModified: {}, lastRefreshed: {})", new Date(lastModified), new Date(lastRefreshedTime.get())); + log.debug("all() forcing refresh (lastModified: {}, lastRefreshed: {})", + value("lastModified", new Date(lastModified)), + value("lastRefreshed", new Date(lastRefreshedTime.get()))); long startTime = System.nanoTime(); refresh(); long elapsed = System.nanoTime() - startTime; @@ -273,7 +276,7 @@ private Set fetchAllItems(Set existingItems) { for (T item : existingItems) { if (keyUpdateTime.containsKey(buildObjectKey(item))) { if (resultMap.containsKey(item.getId())) { - log.error(String.format("Duplicate item id found, last-write wins: (id: %s)", item.getId())); + log.error("Duplicate item id found, last-write wins: (id: {})", value("id", item.getId())); } resultMap.put(item.getId(), item); } @@ -299,7 +302,7 @@ private Set fetchAllItems(Set existingItems) { if (!existingItems.isEmpty() && !modifiedKeys.isEmpty()) { // only log keys that have been modified after initial cache load - log.debug("Modified object keys: {}", modifiedKeys); + log.debug("Modified object keys: {}", value("keys", modifiedKeys)); } Observable @@ -336,7 +339,9 @@ private Set fetchAllItems(Set existingItems) { removeCounter.increment(existingSize + numAdded.get() - resultSize); if (existingSize != resultSize) { log.info("{}={} delta={}", - objectType.group, resultSize, resultSize - existingSize); + value("objectType", objectType.group), + value("resultSize", resultSize), + value("delta", resultSize - existingSize)); } return result; diff --git a/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/application/Application.groovy b/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/application/Application.groovy index d2de4f24c..89d0082b5 100644 --- a/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/application/Application.groovy +++ b/front50-core/src/main/groovy/com/netflix/spinnaker/front50/model/application/Application.groovy @@ -42,6 +42,8 @@ import groovy.transform.stc.SimpleType import groovy.util.logging.Slf4j import org.springframework.validation.Errors +import static net.logstash.logback.argument.StructuredArguments.value + @ToString @Slf4j class Application implements Timestamped { @@ -146,7 +148,7 @@ class Application implements Timestamped { } if (!currentApplication) { - log.warn("Application does not exist (name: ${name}), nothing to delete") + log.warn("Application does not exist (name: {}), nothing to delete", value("application", name)) return } @@ -326,7 +328,8 @@ class Application implements Timestamped { log.error("Rollback failed (onRollback)", rollbackException) } - log.error("Failed to perform action (name: ${originalApplication?.name ?: updatedApplication?.name})") + log.error("Failed to perform action (name: {})", + value("application", originalApplication?.name ?: updatedApplication?.name)) throw e } } @@ -371,8 +374,8 @@ class Application implements Timestamped { @JsonSetter void setRequiredGroupMembership(List requiredGroupMembership) { - log.warn("Required group membership settings detected in application ${name}. " + - "Please update to `permissions` format.") + log.warn("Required group membership settings detected in application {} " + + "Please update to `permissions` format.", value("application", name)) if (!permissions.isRestricted()) { // Do not overwrite permissions if it contains values Permissions.Builder b = new Permissions.Builder() diff --git a/front50-gcs/src/main/java/com/netflix/spinnaker/front50/config/GcsConfig.java b/front50-gcs/src/main/java/com/netflix/spinnaker/front50/config/GcsConfig.java index bb07b0985..0783e2d85 100644 --- a/front50-gcs/src/main/java/com/netflix/spinnaker/front50/config/GcsConfig.java +++ b/front50-gcs/src/main/java/com/netflix/spinnaker/front50/config/GcsConfig.java @@ -35,6 +35,8 @@ import java.util.Optional; import java.util.concurrent.Executors; +import static net.logstash.logback.argument.StructuredArguments.value; + @Configuration @ConditionalOnExpression("${spinnaker.gcs.enabled:false}") @EnableConfigurationProperties(GcsProperties.class) @@ -95,9 +97,10 @@ private GcsStorageService googleCloudStorageService(String dataFilename, GcsProp } service.ensureBucketExists(); log.info("Using Google Cloud Storage bucket={} in project={}", - gcsProperties.getBucket(), gcsProperties.getProject()); + value("bucket", gcsProperties.getBucket()), + value("project", gcsProperties.getProject())); log.info("Bucket versioning is {}.", - service.supportsVersioning() ? "enabled" : "DISABLED"); + value("versioning", service.supportsVersioning() ? "enabled" : "DISABLED")); return service; } diff --git a/front50-gcs/src/main/java/com/netflix/spinnaker/front50/model/GcsStorageService.java b/front50-gcs/src/main/java/com/netflix/spinnaker/front50/model/GcsStorageService.java index 0b17da20d..a0d5b7a46 100644 --- a/front50-gcs/src/main/java/com/netflix/spinnaker/front50/model/GcsStorageService.java +++ b/front50-gcs/src/main/java/com/netflix/spinnaker/front50/model/GcsStorageService.java @@ -60,6 +60,8 @@ import java.util.List; import java.util.Map; +import static net.logstash.logback.argument.StructuredArguments.value; + public class GcsStorageService implements StorageService { private static final String DEFAULT_DATA_FILENAME = "specification.json"; @@ -104,7 +106,7 @@ private GoogleCredential loadCredential( FileInputStream stream = new FileInputStream(jsonPath); credential = GoogleCredential.fromStream(stream, transport, factory) .createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); - log.info("Loaded credentials from " + jsonPath); + log.info("Loaded credentials from {}", value("jsonPath", jsonPath)); } else { log.info("spinnaker.gcs.enabled without spinnaker.gcs.jsonPath. " + "Using default application credentials. Using default credentials."); @@ -243,7 +245,8 @@ public void ensureBucketExists() { } catch (HttpResponseException e) { if (e.getStatusCode() == 404) { log.warn("Bucket {} does not exist. Creating it in project={}", - bucketName, projectName); + value("bucket", bucketName), + value("project", projectName)); Bucket.Versioning versioning = new Bucket.Versioning().setEnabled(true); Bucket bucket = new Bucket().setName(bucketName).setVersioning(versioning); if (StringUtils.isNotBlank(bucketLocation)) { @@ -253,15 +256,17 @@ public void ensureBucketExists() { storage.buckets().insert(projectName, bucket).execute(); } catch (IOException e2) { log.error("Could not create bucket={} in project={}: {}", - bucketName, projectName, e2); + value("bucket", bucketName), + value("project", projectName), + e2); throw new IllegalStateException(e2); } } else { - log.error("Could not get bucket={}: {}", bucketName, e); + log.error("Could not get bucket={}: {}", value("bucket", bucketName), e); throw new IllegalStateException(e); } } catch (IOException e) { - log.error("Could not get bucket={}: {}", bucketName, e); + log.error("Could not get bucket={}: {}", value("bucket", bucketName), e); throw new IllegalStateException(e); } } @@ -295,13 +300,16 @@ public Object doCall() throws Exception { T item = deserialize(storageObjectHolder[0], (Class) objectType.clazz, true); item.setLastModified(storageObjectHolder[0].getUpdated().getValue()); - log.debug("Loaded bucket={} path={}", bucketName, path); + log.debug("Loaded bucket={} path={}", value("bucket", bucketName), value("path", path)); return item; } catch (IOException e) { if (e instanceof HttpResponseException) { HttpResponseException hre = (HttpResponseException)e; log.error("Failed to load {} {}: {} {}", - objectType.group, objectKey, hre.getStatusCode(), hre.getStatusMessage()); + value("group", objectType.group), + value("key", objectKey), + value("responseStatus", hre.getStatusCode()), + value("errorMsg", hre.getStatusMessage())); if (hre.getStatusCode() == 404) { throw new NotFoundException(String.format("No file at path=%s", path)); } @@ -315,7 +323,7 @@ public void deleteObject(ObjectType objectType, String objectKey) { String path = keyToPath(objectKey, objectType.group); try { timeExecute(deleteTimer, obj_api.delete(bucketName, path)); - log.info("Deleted {} '{}'", objectType.group, objectKey); + log.info("Deleted {} '{}'", value("group", objectType.group), value("key", objectKey)); writeLastModified(objectType.group); } catch (HttpResponseException e) { if (e.getStatusCode() == 404) { @@ -323,7 +331,7 @@ public void deleteObject(ObjectType objectType, String objectKey) { } throw new IllegalStateException(e); } catch (IOException ioex) { - log.error("Failed to delete path={}: {}", path, ioex); + log.error("Failed to delete path={}", value("path", path), ioex); throw new IllegalStateException(ioex); } } @@ -339,9 +347,9 @@ public void storeObject(ObjectType objectType, String ob ByteArrayContent content = new ByteArrayContent("application/json", bytes); timeExecute(insertTimer, obj_api.insert(bucketName, object, content)); writeLastModified(objectType.group); - log.info("Wrote {} '{}'", objectType.group, objectKey); + log.info("Wrote {} '{}'", value("group", objectType.group), value("key", objectKey)); } catch (IOException e) { - log.error("Update failed on path={}: {}", path, e); + log.error("Update failed on path={}: {}", value("path", path), e); throw new IllegalStateException(e); } } @@ -461,10 +469,12 @@ public Void call() throws Exception { return objectMapper.readValue(json, clas); } catch (Exception ex) { if (current_version) { - log.error("Error reading {}: {}", object.getName(), ex); + log.error("Error reading {}: {}", value("object", object.getName()), ex); } else { log.error("Error reading {} generation={}: {}", - object.getName(), object.getGeneration(), ex); + value("object", object.getName()), + value("generation", object.getGeneration()), + ex); } return null; } @@ -485,17 +495,17 @@ private void writeLastModified(String daoTypeName) { ByteArrayContent content = new ByteArrayContent("application/json", bytes); try { - log.info("Attempting to add {}", timestamp_path); + log.info("Attempting to add {}", value("path", timestamp_path)); timeExecute(insertTimer, obj_api.insert(bucketName, object, content)); } catch (IOException ioex) { log.error("writeLastModified failed to update {}\n{}", - timestamp_path, e.toString()); - log.error("writeLastModified insert failed too: {}", ioex); + value("path", timestamp_path), e.toString()); + log.error("writeLastModified insert failed too", ioex); throw new IllegalStateException(e); } } else { log.error("writeLastModified failed to update {}\n{}", - timestamp_path, e.toString()); + value("path", timestamp_path), value("exception", e.toString())); throw new IllegalStateException(e); } } catch (IOException e) { @@ -510,7 +520,7 @@ private void writeLastModified(String daoTypeName) { purgeOldVersions(timestamp_path); } catch (Exception e) { log.warn("Failed to purge old versions of {}. Ignoring error.", - timestamp_path); + value("path", timestamp_path)); } } @@ -542,7 +552,7 @@ private void purgeOldVersions(String path) throws Exception { if (generation == generations.get(0)) { continue; } - log.debug("Remove {} generation {}", path, generation); + log.debug("Remove {} generation {}", value("path", path), value("generation", generation)); timeExecute(purgeTimer, obj_api.delete(bucketName, path).setGeneration(generation)); } } @@ -566,14 +576,14 @@ public Object doCall() throws Exception { HttpResponseException hre = (HttpResponseException)e; long now = System.currentTimeMillis(); if (hre.getStatusCode() == 404) { - log.info("No timestamp file at {}. Creating a new one.", path); + log.info("No timestamp file at {}. Creating a new one.", value("path", path)); writeLastModified(objectType.group); return now; } - log.error("Error writing timestamp file {}", e.toString()); + log.error("Error writing timestamp file {}", e); return now; } else { - log.error("Error accessing timestamp file {}", e.toString()); + log.error("Error accessing timestamp file {}", e); return System.currentTimeMillis(); } } diff --git a/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/LinearToParallelMigration.java b/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/LinearToParallelMigration.java index da2530161..d0920294c 100644 --- a/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/LinearToParallelMigration.java +++ b/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/LinearToParallelMigration.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static java.lang.String.format; +import static net.logstash.logback.argument.StructuredArguments.value; @Component public class LinearToParallelMigration implements Migration { @@ -68,7 +69,8 @@ public void run() { } private void migrate(ItemDAO dao, String type, Pipeline pipeline) { - log.info(format("Migrating %s '%s' from linear -> parallel", type, pipeline.getId())); + log.info(format("Migrating {} '{}' from linear -> parallel", + value("type", type), value("id", pipeline.getId()))); AtomicInteger refId = new AtomicInteger(0); List> stages = (List>) pipeline.getOrDefault("stages", Collections.emptyList()); @@ -86,7 +88,8 @@ private void migrate(ItemDAO dao, String type, Pipeline pipeline) { pipeline.put("parallel", true); dao.update(pipeline.getId(), pipeline); - log.info(format("Migrated %s '%s' from linear -> parallel", type, pipeline.getId())); + log.info(format("Migrated %s '%s' from linear -> parallel", + value("type", type), value("id", pipeline.getId()))); } } diff --git a/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/MigrationRunner.java b/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/MigrationRunner.java index b633cf6df..1e21517f9 100644 --- a/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/MigrationRunner.java +++ b/front50-migrations/src/main/java/com/netflix/spinnaker/front50/migrations/MigrationRunner.java @@ -25,6 +25,7 @@ import java.util.Collection; import static java.lang.String.format; +import static net.logstash.logback.argument.StructuredArguments.value; @Component public class MigrationRunner { @@ -44,7 +45,9 @@ void run() { try { migration.run(); } catch (Exception e) { - log.error(format("Migration failure (%s)", migration.getClass().getSimpleName()), e); + log.error("Migration failure ({}): {}", + value("class", migration.getClass().getSimpleName()), + value("exception", e)); } }); } diff --git a/front50-pipelines/src/main/groovy/com/netflix/spinnaker/front50/pipeline/PipelineRepository.groovy b/front50-pipelines/src/main/groovy/com/netflix/spinnaker/front50/pipeline/PipelineRepository.groovy index eabe69383..b97491ac2 100644 --- a/front50-pipelines/src/main/groovy/com/netflix/spinnaker/front50/pipeline/PipelineRepository.groovy +++ b/front50-pipelines/src/main/groovy/com/netflix/spinnaker/front50/pipeline/PipelineRepository.groovy @@ -35,6 +35,8 @@ import org.springframework.stereotype.Repository import javax.annotation.PostConstruct +import static net.logstash.logback.argument.StructuredArguments.value; + /** * Repository for presets */ diff --git a/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/EventingS3ObjectKeyLoader.java b/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/EventingS3ObjectKeyLoader.java index 779a3daab..5c54455e7 100644 --- a/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/EventingS3ObjectKeyLoader.java +++ b/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/EventingS3ObjectKeyLoader.java @@ -47,6 +47,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import static net.logstash.logback.argument.StructuredArguments.value; + /** * An ObjectKeyLoader is responsible for returning a last modified timestamp for all objects of a particular type. * @@ -102,7 +104,7 @@ public EventingS3ObjectKeyLoader(ExecutorService executionService, new CacheLoader>() { @Override public Map load(ObjectType objectType) throws Exception { - log.debug("Loading object keys for {}", objectType); + log.debug("Loading object keys for {}", value("type", objectType)); return s3StorageService.listObjectKeys(objectType); } @@ -110,7 +112,7 @@ public Map load(ObjectType objectType) throws Exception { public ListenableFuture> reload(ObjectType objectType, Map previous) throws Exception { ListenableFutureTask> task = ListenableFutureTask.create( () -> { - log.debug("Refreshing object keys for {} (asynchronous)", objectType); + log.debug("Refreshing object keys for {} (asynchronous)", value("type", objectType)); return s3StorageService.listObjectKeys(objectType); } ); @@ -150,19 +152,19 @@ public Map listObjectKeys(ObjectType objectType) { if (currentLastModifiedTime > previousLastModifiedTime) { log.info( "Detected Recent Modification (type: {}, key: {}, previous: {}, current: {})", - objectType, - key, - new Date(previousLastModifiedTime), - new Date(e.getValue()) + value("type", objectType), + value("key", key), + value("previousTime", new Date(previousLastModifiedTime)), + value("currentTime", new Date(e.getValue())) ); objectKeys.put(key, currentLastModifiedTime); } } else { log.info( "Detected Recent Modification (type: {}, key: {}, current: {})", - objectType, - key, - new Date(e.getValue()) + value("type", objectType), + value("key", key), + value("currentTime", new Date(e.getValue())) ); objectKeys.put(key, e.getValue()); } @@ -210,10 +212,10 @@ private void tick(S3Event s3Event) { log.debug( "Received Event (objectType: {}, type: {}, key: {}, delta: {})", - keyWithObjectType.objectType, - eventType, - keyWithObjectType.key, - System.currentTimeMillis() - eventTime.getMillis() + value("objectType", keyWithObjectType.objectType), + value("type", eventType), + value("key", keyWithObjectType.key), + value("delta", System.currentTimeMillis() - eventTime.getMillis()) ); objectKeysByLastModifiedCache.put(keyWithObjectType, eventTime.getMillis()); @@ -250,14 +252,14 @@ private static S3Event unmarshall(ObjectMapper objectMapper, String messageBody) try { s3EventWrapper = objectMapper.readValue(messageBody, S3EventWrapper.class); } catch (IOException e) { - log.debug("Unable unmarshal S3EventWrapper (body: {})", messageBody, e); + log.debug("Unable unmarshal S3EventWrapper (body: {})", value("message", messageBody), e); return null; } try { return objectMapper.readValue(s3EventWrapper.message, S3Event.class); } catch (IOException e) { - log.debug("Unable unmarshal S3Event (body: {})", s3EventWrapper.message, e); + log.debug("Unable unmarshal S3Event (body: {})", value("body", s3EventWrapper.message), e); return null; } } diff --git a/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/S3StorageService.java b/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/S3StorageService.java index 91d61b6d8..ad21f152c 100644 --- a/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/S3StorageService.java +++ b/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/S3StorageService.java @@ -34,6 +34,8 @@ import java.util.*; import java.util.stream.Collectors; +import static net.logstash.logback.argument.StructuredArguments.value; + public class S3StorageService implements StorageService { private static final Logger log = LoggerFactory.getLogger(S3StorageService.class); @@ -69,15 +71,18 @@ public void ensureBucketExists() { } catch (AmazonServiceException e) { if (e.getStatusCode() == 404) { if (StringUtils.isNullOrEmpty(region)) { - log.info("Creating bucket " + bucket + " in default region"); + log.info("Creating bucket {} in default region", value("bucket", bucket)); amazonS3.createBucket(bucket); } else { - log.info("Creating bucket " + bucket + " in region " + region + "..."); + log.info("Creating bucket {} in region {}", + value("bucket", bucket), + value("region", region) + ); amazonS3.createBucket(bucket, region); } if (versioning) { - log.info("Enabling versioning of the S3 bucket " + bucket); + log.info("Enabling versioning of the S3 bucket {}", value("bucket", bucket)); BucketVersioningConfiguration configuration = new BucketVersioningConfiguration().withStatus("Enabled"); @@ -162,7 +167,10 @@ public Map listObjectKeys(ObjectType objectType) { summaries.addAll(bucketListing.getObjectSummaries()); } - log.debug("Took {}ms to fetch {} object keys for {}", (System.currentTimeMillis() - startTime), summaries.size(), objectType); + log.debug("Took {}ms to fetch {} object keys for {}", + value("fetchTime", (System.currentTimeMillis() - startTime)), + summaries.size(), + value("type", objectType)); return summaries .stream() diff --git a/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/TemporarySQSQueue.java b/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/TemporarySQSQueue.java index e0b3bbf23..03b79a715 100644 --- a/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/TemporarySQSQueue.java +++ b/front50-s3/src/main/java/com/netflix/spinnaker/front50/model/TemporarySQSQueue.java @@ -39,6 +39,8 @@ import java.util.HashMap; import java.util.List; +import static net.logstash.logback.argument.StructuredArguments.value; + /** * Encapsulates the lifecycle of a temporary queue. * @@ -78,18 +80,18 @@ void markMessageAsHandled(String receiptHandle) { try { amazonSQS.deleteMessage(temporaryQueue.sqsQueueUrl, receiptHandle); } catch (ReceiptHandleIsInvalidException e) { - log.warn("Error deleting message, reason: {} (receiptHandle: {})", e.getMessage(), receiptHandle); + log.warn("Error deleting message, reason: {} (receiptHandle: {})", e.getMessage(), value("receiptHandle", receiptHandle)); } } @PreDestroy void shutdown() { try { - log.debug("Removing Temporary S3 Notification Queue: {}", temporaryQueue.sqsQueueUrl); + log.debug("Removing Temporary S3 Notification Queue: {}", value("queue", temporaryQueue.sqsQueueUrl)); amazonSQS.deleteQueue(temporaryQueue.sqsQueueUrl); - log.debug("Removed Temporary S3 Notification Queue: {}", temporaryQueue.sqsQueueUrl); + log.debug("Removed Temporary S3 Notification Queue: {}", value("queue", temporaryQueue.sqsQueueUrl)); } catch (Exception e) { - log.error("Unable to remove queue: {} (reason: {})", temporaryQueue.sqsQueueUrl, e.getMessage(), e); + log.error("Unable to remove queue: {} (reason: {})", value("queue", temporaryQueue.sqsQueueUrl), e.getMessage(), e); } try { @@ -97,7 +99,7 @@ void shutdown() { amazonSNS.unsubscribe(temporaryQueue.snsTopicSubscriptionArn); log.debug("Removed S3 Notification Subscription: {}", temporaryQueue.snsTopicSubscriptionArn); } catch (Exception e) { - log.error("Unable to unsubscribe queue from topic: {} (reason: {})", temporaryQueue.snsTopicSubscriptionArn, e.getMessage(), e); + log.error("Unable to unsubscribe queue from topic: {} (reason: {})", value("topic", temporaryQueue.snsTopicSubscriptionArn), e.getMessage(), e); } } @@ -126,7 +128,7 @@ private TemporaryQueue createQueue(String snsTopicArn, String sqsQueueArn, Strin .withQueueName(sqsQueueName) .withAttributes(Collections.singletonMap("MessageRetentionPeriod", "60")) // 60s message retention ).getQueueUrl(); - log.info("Created Temporary S3 Notification Queue: {}", sqsQueueUrl); + log.info("Created Temporary S3 Notification Queue: {}", value("queue", sqsQueueUrl)); String snsTopicSubscriptionArn = amazonSNS.subscribe(snsTopicArn, "sqs", sqsQueueArn).getSubscriptionArn(); diff --git a/front50-swift/src/main/java/com/netflix/spinnaker/front50/model/SwiftStorageService.java b/front50-swift/src/main/java/com/netflix/spinnaker/front50/model/SwiftStorageService.java index 88d35494d..1ebbf9442 100644 --- a/front50-swift/src/main/java/com/netflix/spinnaker/front50/model/SwiftStorageService.java +++ b/front50-swift/src/main/java/com/netflix/spinnaker/front50/model/SwiftStorageService.java @@ -47,6 +47,8 @@ import java.util.Map; import java.util.stream.Collectors; +import static net.logstash.logback.argument.StructuredArguments.value; + public class SwiftStorageService implements StorageService { private static final Logger log = LoggerFactory.getLogger(SwiftStorageService.class); @@ -142,7 +144,7 @@ public void storeObject(ObjectType objectType, String ob getSwift().objects().put(containerName, objectKey, Payloads.create(is), ObjectPutOptions.create().path(objectType.group)); } catch (IOException e) { - log.error("failed to write object={}: {}", objectKey, e); + log.error("failed to write object={}: {}", value("key", objectKey), e); throw new IllegalStateException(e); } } @@ -186,7 +188,7 @@ private T deserialize(SwiftObject object, Class clazz item.setLastModified(object.getLastModified().getTime()); return item; } catch (Exception e) { - log.error("Error reading {}: {}", object.getName(), e); + log.error("Error reading {}: {}", value("key", object.getName()), e); } return null; }