diff --git a/CHANGELOG.md b/CHANGELOG.md index a40a5e77618..936bd3fe290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## Unreleased + +### Fixes + +- Reduce excessive CPU usage when serializing breadcrumbs to disk for ANRs ([#4181](https://github.com/getsentry/sentry-java/pull/4181)) + ## 7.22.2 ### Fixes diff --git a/buildSrc/src/main/java/Config.kt b/buildSrc/src/main/java/Config.kt index 8f7f495e27f..26c6e9775b1 100644 --- a/buildSrc/src/main/java/Config.kt +++ b/buildSrc/src/main/java/Config.kt @@ -202,6 +202,7 @@ object Config { val msgpack = "org.msgpack:msgpack-core:0.9.8" val leakCanaryInstrumentation = "com.squareup.leakcanary:leakcanary-android-instrumentation:2.14" val composeUiTestJunit4 = "androidx.compose.ui:ui-test-junit4:$composeVersion" + val okio = "com.squareup.okio:okio:1.13.0" } object QualityPlugins { diff --git a/sentry-android-core/src/main/java/io/sentry/android/core/AndroidOptionsInitializer.java b/sentry-android-core/src/main/java/io/sentry/android/core/AndroidOptionsInitializer.java index 80d81671ee5..5378f93110b 100644 --- a/sentry-android-core/src/main/java/io/sentry/android/core/AndroidOptionsInitializer.java +++ b/sentry-android-core/src/main/java/io/sentry/android/core/AndroidOptionsInitializer.java @@ -143,6 +143,11 @@ static void initializeIntegrationsAndProcessors( new AndroidConnectionStatusProvider(context, options.getLogger(), buildInfoProvider)); } + if (options.getCacheDirPath() != null) { + options.addScopeObserver(new PersistingScopeObserver(options)); + options.addOptionsObserver(new PersistingOptionsObserver(options)); + } + options.addEventProcessor(new DeduplicateMultithreadedEventProcessor(options)); options.addEventProcessor( new DefaultAndroidEventProcessor(context, buildInfoProvider, options)); @@ -221,13 +226,6 @@ static void initializeIntegrationsAndProcessors( } } options.setTransactionPerformanceCollector(new DefaultTransactionPerformanceCollector(options)); - - if (options.getCacheDirPath() != null) { - if (options.isEnableScopePersistence()) { - options.addScopeObserver(new PersistingScopeObserver(options)); - } - options.addOptionsObserver(new PersistingOptionsObserver(options)); - } } static void installDefaultIntegrations( @@ -273,6 +271,8 @@ static void installDefaultIntegrations( // AppLifecycleIntegration has to be installed before AnrIntegration, because AnrIntegration // relies on AppState set by it options.addIntegration(new AppLifecycleIntegration()); + // AnrIntegration must be installed before ReplayIntegration, as ReplayIntegration relies on + // it to set the replayId in case of an ANR options.addIntegration(AnrIntegrationFactory.create(context, buildInfoProvider)); // registerActivityLifecycleCallbacks is only available if Context is an AppContext diff --git a/sentry-android-core/src/main/java/io/sentry/android/core/AnrV2EventProcessor.java b/sentry-android-core/src/main/java/io/sentry/android/core/AnrV2EventProcessor.java index 990facd6244..0f07151d2fc 100644 --- a/sentry-android-core/src/main/java/io/sentry/android/core/AnrV2EventProcessor.java +++ b/sentry-android-core/src/main/java/io/sentry/android/core/AnrV2EventProcessor.java @@ -33,6 +33,7 @@ import io.sentry.SentryEvent; import io.sentry.SentryExceptionFactory; import io.sentry.SentryLevel; +import io.sentry.SentryOptions; import io.sentry.SentryStackTraceFactory; import io.sentry.SpanContext; import io.sentry.android.core.internal.util.CpuInfoUtils; @@ -83,6 +84,8 @@ public final class AnrV2EventProcessor implements BackfillingEventProcessor { private final @NotNull SentryExceptionFactory sentryExceptionFactory; + private final @Nullable PersistingScopeObserver persistingScopeObserver; + public AnrV2EventProcessor( final @NotNull Context context, final @NotNull SentryAndroidOptions options, @@ -90,6 +93,7 @@ public AnrV2EventProcessor( this.context = ContextUtils.getApplicationContext(context); this.options = options; this.buildInfoProvider = buildInfoProvider; + this.persistingScopeObserver = options.findPersistingScopeObserver(); final SentryStackTraceFactory sentryStackTraceFactory = new SentryStackTraceFactory(this.options); @@ -188,8 +192,7 @@ private boolean sampleReplay(final @NotNull SentryEvent event) { } private void setReplayId(final @NotNull SentryEvent event) { - @Nullable - String persistedReplayId = PersistingScopeObserver.read(options, REPLAY_FILENAME, String.class); + @Nullable String persistedReplayId = readFromDisk(options, REPLAY_FILENAME, String.class); final @NotNull File replayFolder = new File(options.getCacheDirPath(), "replay_" + persistedReplayId); if (!replayFolder.exists()) { @@ -224,8 +227,7 @@ private void setReplayId(final @NotNull SentryEvent event) { } private void setTrace(final @NotNull SentryEvent event) { - final SpanContext spanContext = - PersistingScopeObserver.read(options, TRACE_FILENAME, SpanContext.class); + final SpanContext spanContext = readFromDisk(options, TRACE_FILENAME, SpanContext.class); if (event.getContexts().getTrace() == null) { if (spanContext != null && spanContext.getSpanId() != null @@ -236,8 +238,7 @@ private void setTrace(final @NotNull SentryEvent event) { } private void setLevel(final @NotNull SentryEvent event) { - final SentryLevel level = - PersistingScopeObserver.read(options, LEVEL_FILENAME, SentryLevel.class); + final SentryLevel level = readFromDisk(options, LEVEL_FILENAME, SentryLevel.class); if (event.getLevel() == null) { event.setLevel(level); } @@ -246,7 +247,7 @@ private void setLevel(final @NotNull SentryEvent event) { @SuppressWarnings("unchecked") private void setFingerprints(final @NotNull SentryEvent event, final @NotNull Object hint) { final List fingerprint = - (List) PersistingScopeObserver.read(options, FINGERPRINT_FILENAME, List.class); + (List) readFromDisk(options, FINGERPRINT_FILENAME, List.class); if (event.getFingerprints() == null) { event.setFingerprints(fingerprint); } @@ -262,16 +263,14 @@ private void setFingerprints(final @NotNull SentryEvent event, final @NotNull Ob } private void setTransaction(final @NotNull SentryEvent event) { - final String transaction = - PersistingScopeObserver.read(options, TRANSACTION_FILENAME, String.class); + final String transaction = readFromDisk(options, TRANSACTION_FILENAME, String.class); if (event.getTransaction() == null) { event.setTransaction(transaction); } } private void setContexts(final @NotNull SentryBaseEvent event) { - final Contexts persistedContexts = - PersistingScopeObserver.read(options, CONTEXTS_FILENAME, Contexts.class); + final Contexts persistedContexts = readFromDisk(options, CONTEXTS_FILENAME, Contexts.class); if (persistedContexts == null) { return; } @@ -291,7 +290,7 @@ private void setContexts(final @NotNull SentryBaseEvent event) { @SuppressWarnings("unchecked") private void setExtras(final @NotNull SentryBaseEvent event) { final Map extras = - (Map) PersistingScopeObserver.read(options, EXTRAS_FILENAME, Map.class); + (Map) readFromDisk(options, EXTRAS_FILENAME, Map.class); if (extras == null) { return; } @@ -309,14 +308,12 @@ private void setExtras(final @NotNull SentryBaseEvent event) { @SuppressWarnings("unchecked") private void setBreadcrumbs(final @NotNull SentryBaseEvent event) { final List breadcrumbs = - (List) - PersistingScopeObserver.read( - options, BREADCRUMBS_FILENAME, List.class, new Breadcrumb.Deserializer()); + (List) readFromDisk(options, BREADCRUMBS_FILENAME, List.class); if (breadcrumbs == null) { return; } if (event.getBreadcrumbs() == null) { - event.setBreadcrumbs(new ArrayList<>(breadcrumbs)); + event.setBreadcrumbs(breadcrumbs); } else { event.getBreadcrumbs().addAll(breadcrumbs); } @@ -326,7 +323,7 @@ private void setBreadcrumbs(final @NotNull SentryBaseEvent event) { private void setScopeTags(final @NotNull SentryBaseEvent event) { final Map tags = (Map) - PersistingScopeObserver.read(options, PersistingScopeObserver.TAGS_FILENAME, Map.class); + readFromDisk(options, PersistingScopeObserver.TAGS_FILENAME, Map.class); if (tags == null) { return; } @@ -343,19 +340,29 @@ private void setScopeTags(final @NotNull SentryBaseEvent event) { private void setUser(final @NotNull SentryBaseEvent event) { if (event.getUser() == null) { - final User user = PersistingScopeObserver.read(options, USER_FILENAME, User.class); + final User user = readFromDisk(options, USER_FILENAME, User.class); event.setUser(user); } } private void setRequest(final @NotNull SentryBaseEvent event) { if (event.getRequest() == null) { - final Request request = - PersistingScopeObserver.read(options, REQUEST_FILENAME, Request.class); + final Request request = readFromDisk(options, REQUEST_FILENAME, Request.class); event.setRequest(request); } } + private @Nullable T readFromDisk( + final @NotNull SentryOptions options, + final @NotNull String fileName, + final @NotNull Class clazz) { + if (persistingScopeObserver == null) { + return null; + } + + return persistingScopeObserver.read(options, fileName, clazz); + } + // endregion // region options persisted values diff --git a/sentry-android-core/src/test/java/io/sentry/android/core/AndroidOptionsInitializerTest.kt b/sentry-android-core/src/test/java/io/sentry/android/core/AndroidOptionsInitializerTest.kt index f234f7a6402..31d1547ca49 100644 --- a/sentry-android-core/src/test/java/io/sentry/android/core/AndroidOptionsInitializerTest.kt +++ b/sentry-android-core/src/test/java/io/sentry/android/core/AndroidOptionsInitializerTest.kt @@ -20,6 +20,7 @@ import io.sentry.android.timber.SentryTimberIntegration import io.sentry.cache.PersistingOptionsObserver import io.sentry.cache.PersistingScopeObserver import io.sentry.compose.gestures.ComposeGestureTargetLocator +import io.sentry.test.ImmediateExecutorService import org.junit.runner.RunWith import org.mockito.kotlin.any import org.mockito.kotlin.eq @@ -55,6 +56,7 @@ class AndroidOptionsInitializerTest { configureContext: Context.() -> Unit = {}, assets: AssetManager? = null ) { + sentryOptions.executorService = ImmediateExecutorService() mockContext = if (metadata != null) { ContextUtilsTestHelper.mockMetaData( mockContext = ContextUtilsTestHelper.createMockContext(hasAppContext), @@ -686,9 +688,10 @@ class AndroidOptionsInitializerTest { } @Test - fun `PersistingScopeObserver is not set to options, if scope persistence is disabled`() { + fun `PersistingScopeObserver is no-op, if scope persistence is disabled`() { fixture.initSut(configureOptions = { isEnableScopePersistence = false }) - assertTrue { fixture.sentryOptions.scopeObservers.none { it is PersistingScopeObserver } } + fixture.sentryOptions.findPersistingScopeObserver()?.setTags(mapOf("key" to "value")) + assertFalse(File(AndroidOptionsInitializer.getCacheDir(fixture.context), PersistingScopeObserver.SCOPE_CACHE).exists()) } } diff --git a/sentry-android-core/src/test/java/io/sentry/android/core/AnrV2EventProcessorTest.kt b/sentry-android-core/src/test/java/io/sentry/android/core/AnrV2EventProcessorTest.kt index 6065e81e086..6d2d005eaab 100644 --- a/sentry-android-core/src/test/java/io/sentry/android/core/AnrV2EventProcessorTest.kt +++ b/sentry-android-core/src/test/java/io/sentry/android/core/AnrV2EventProcessorTest.kt @@ -35,6 +35,7 @@ import io.sentry.cache.PersistingScopeObserver.TAGS_FILENAME import io.sentry.cache.PersistingScopeObserver.TRACE_FILENAME import io.sentry.cache.PersistingScopeObserver.TRANSACTION_FILENAME import io.sentry.cache.PersistingScopeObserver.USER_FILENAME +import io.sentry.cache.tape.QueueFile import io.sentry.hints.AbnormalExit import io.sentry.hints.Backfillable import io.sentry.protocol.Browser @@ -61,6 +62,7 @@ import org.robolectric.annotation.Config import org.robolectric.shadow.api.Shadow import org.robolectric.shadows.ShadowActivityManager import org.robolectric.shadows.ShadowBuild +import java.io.ByteArrayOutputStream import java.io.File import kotlin.test.BeforeTest import kotlin.test.Test @@ -98,6 +100,7 @@ class AnrV2EventProcessorTest { options.cacheDirPath = dir.newFolder().absolutePath options.environment = "release" options.isSendDefaultPii = isSendDefaultPii + options.addScopeObserver(PersistingScopeObserver(options)) whenever(buildInfo.sdkInfoVersion).thenReturn(currentSdk) whenever(buildInfo.isEmulator).thenReturn(true) @@ -147,7 +150,16 @@ class AnrV2EventProcessorTest { fun persistScope(filename: String, entity: T) { val dir = File(options.cacheDirPath, SCOPE_CACHE).also { it.mkdirs() } val file = File(dir, filename) - options.serializer.serialize(entity, file.writer()) + if (filename == BREADCRUMBS_FILENAME) { + val queueFile = QueueFile.Builder(file).build() + (entity as List).forEach { crumb -> + val baos = ByteArrayOutputStream() + options.serializer.serialize(crumb, baos.writer()) + queueFile.add(baos.toByteArray()) + } + } else { + options.serializer.serialize(entity, file.writer()) + } } fun persistOptions(filename: String, entity: T) { @@ -621,7 +633,7 @@ class AnrV2EventProcessorTest { val processed = processor.process(SentryEvent(), hint)!! assertEquals(replayId1.toString(), processed.contexts[Contexts.REPLAY_ID].toString()) - assertEquals(replayId1.toString(), PersistingScopeObserver.read(fixture.options, REPLAY_FILENAME, String::class.java)) + assertEquals(replayId1.toString(), fixture.options.findPersistingScopeObserver()?.read(fixture.options, REPLAY_FILENAME, String::class.java)) } private fun processEvent( diff --git a/sentry-android-core/src/test/java/io/sentry/android/core/SentryAndroidTest.kt b/sentry-android-core/src/test/java/io/sentry/android/core/SentryAndroidTest.kt index a4608582f78..9645bb0b2d5 100644 --- a/sentry-android-core/src/test/java/io/sentry/android/core/SentryAndroidTest.kt +++ b/sentry-android-core/src/test/java/io/sentry/android/core/SentryAndroidTest.kt @@ -11,6 +11,7 @@ import android.os.SystemClock import androidx.test.core.app.ApplicationProvider import androidx.test.ext.junit.runners.AndroidJUnit4 import io.sentry.Breadcrumb +import io.sentry.DateUtils import io.sentry.Hint import io.sentry.ILogger import io.sentry.ISentryClient @@ -36,10 +37,13 @@ import io.sentry.cache.PersistingOptionsObserver import io.sentry.cache.PersistingOptionsObserver.ENVIRONMENT_FILENAME import io.sentry.cache.PersistingOptionsObserver.OPTIONS_CACHE import io.sentry.cache.PersistingOptionsObserver.RELEASE_FILENAME -import io.sentry.cache.PersistingScopeObserver import io.sentry.cache.PersistingScopeObserver.BREADCRUMBS_FILENAME +import io.sentry.cache.PersistingScopeObserver.REPLAY_FILENAME import io.sentry.cache.PersistingScopeObserver.SCOPE_CACHE import io.sentry.cache.PersistingScopeObserver.TRANSACTION_FILENAME +import io.sentry.cache.tape.QueueFile +import io.sentry.protocol.Contexts +import io.sentry.protocol.SentryId import io.sentry.transport.NoOpEnvelopeCache import io.sentry.util.StringUtils import org.awaitility.kotlin.await @@ -61,6 +65,7 @@ import org.robolectric.annotation.Config import org.robolectric.shadow.api.Shadow import org.robolectric.shadows.ShadowActivityManager import org.robolectric.shadows.ShadowActivityManager.ApplicationExitInfoBuilder +import java.io.ByteArrayOutputStream import java.io.File import java.nio.file.Files import java.util.concurrent.TimeUnit @@ -413,27 +418,31 @@ class SentryAndroidTest { assertEquals("Debug!", event.breadcrumbs!![0].message) assertEquals("staging", event.environment) assertEquals("io.sentry.sample@2.0.0", event.release) + assertEquals("afcb46b1140ade5187c4bbb5daa804df", event.contexts[Contexts.REPLAY_ID]) asserted.set(true) null } // have to do it after the cacheDir is set to options, because it adds a dsn hash after prefillOptionsCache(it.cacheDirPath!!) - prefillScopeCache(it.cacheDirPath!!) + prefillScopeCache(it, it.cacheDirPath!!) it.release = "io.sentry.sample@1.1.0+220" it.environment = "debug" - // this is necessary to delay the AnrV2Integration processing to execute the configure - // scope block below (otherwise it won't be possible as hub is no-op before .init) - it.executorService.submit { - Sentry.configureScope { scope -> - // make sure the scope values changed to test that we're still using previously - // persisted values for the old ANR events - assertEquals("TestActivity", scope.transactionName) - } - } options = it } + options.executorService.submit { + // verify we reset the persisted scope values after the init bg tasks have run to ensure + // clean state for a new process. + assertEquals( + emptyList(), + options.findPersistingScopeObserver()?.read(options, BREADCRUMBS_FILENAME, List::class.java) + ) + assertEquals( + SentryId.EMPTY_ID.toString(), + options.findPersistingScopeObserver()?.read(options, REPLAY_FILENAME, String::class.java) + ) + } Sentry.configureScope { it.setTransaction("TestActivity") it.addBreadcrumb(Breadcrumb.error("Error!")) @@ -447,7 +456,7 @@ class SentryAndroidTest { // assert that persisted values have changed assertEquals( "TestActivity", - PersistingScopeObserver.read(options, TRANSACTION_FILENAME, String::class.java) + options.findPersistingScopeObserver()?.read(options, TRANSACTION_FILENAME, String::class.java) ) assertEquals( "io.sentry.sample@1.1.0+220", @@ -528,19 +537,22 @@ class SentryAndroidTest { assertTrue(optionsRef.eventProcessors.any { it is AnrV2EventProcessor }) } - private fun prefillScopeCache(cacheDir: String) { + private fun prefillScopeCache(options: SentryOptions, cacheDir: String) { val scopeDir = File(cacheDir, SCOPE_CACHE).also { it.mkdirs() } - File(scopeDir, BREADCRUMBS_FILENAME).writeText( - """ - [{ - "timestamp": "2009-11-16T01:08:47.000Z", - "message": "Debug!", - "type": "debug", - "level": "debug" - }] - """.trimIndent() + val queueFile = QueueFile.Builder(File(scopeDir, BREADCRUMBS_FILENAME)).build() + val baos = ByteArrayOutputStream() + options.serializer.serialize( + Breadcrumb(DateUtils.getDateTime("2009-11-16T01:08:47.000Z")).apply { + message = "Debug!" + type = "debug" + level = DEBUG + }, + baos.writer() ) + queueFile.add(baos.toByteArray()) File(scopeDir, TRANSACTION_FILENAME).writeText("\"MainActivity\"") + File(scopeDir, REPLAY_FILENAME).writeText("\"afcb46b1140ade5187c4bbb5daa804df\"") + File(options.getCacheDirPath(), "replay_afcb46b1140ade5187c4bbb5daa804df").mkdirs() } private fun prefillOptionsCache(cacheDir: String) { diff --git a/sentry-android-replay/src/main/java/io/sentry/android/replay/ReplayIntegration.kt b/sentry-android-replay/src/main/java/io/sentry/android/replay/ReplayIntegration.kt index 655b3ca354b..8c572c003c1 100644 --- a/sentry-android-replay/src/main/java/io/sentry/android/replay/ReplayIntegration.kt +++ b/sentry-android-replay/src/main/java/io/sentry/android/replay/ReplayIntegration.kt @@ -37,7 +37,6 @@ import io.sentry.android.replay.util.appContext import io.sentry.android.replay.util.gracefullyShutdown import io.sentry.android.replay.util.sample import io.sentry.android.replay.util.submitSafely -import io.sentry.cache.PersistingScopeObserver import io.sentry.cache.PersistingScopeObserver.BREADCRUMBS_FILENAME import io.sentry.cache.PersistingScopeObserver.REPLAY_FILENAME import io.sentry.hints.Backfillable @@ -412,7 +411,8 @@ public class ReplayIntegration( // TODO: previous run and set them directly to the ReplayEvent so they don't get overwritten in MainEventProcessor options.executorService.submitSafely(options, "ReplayIntegration.finalize_previous_replay") { - val previousReplayIdString = PersistingScopeObserver.read(options, REPLAY_FILENAME, String::class.java) ?: run { + val persistingScopeObserver = options.findPersistingScopeObserver() + val previousReplayIdString = persistingScopeObserver?.read(options, REPLAY_FILENAME, String::class.java) ?: run { cleanupReplays() return@submitSafely } @@ -425,7 +425,9 @@ public class ReplayIntegration( cleanupReplays() return@submitSafely } - val breadcrumbs = PersistingScopeObserver.read(options, BREADCRUMBS_FILENAME, List::class.java, Breadcrumb.Deserializer()) as? List + + @Suppress("UNCHECKED_CAST") + val breadcrumbs = persistingScopeObserver.read(options, BREADCRUMBS_FILENAME, List::class.java) as? List val segment = CaptureStrategy.createSegment( hub = hub, options = options, diff --git a/sentry-android-replay/src/test/java/io/sentry/android/replay/ReplayIntegrationTest.kt b/sentry-android-replay/src/test/java/io/sentry/android/replay/ReplayIntegrationTest.kt index 353b11d8f66..93632d2df7d 100644 --- a/sentry-android-replay/src/test/java/io/sentry/android/replay/ReplayIntegrationTest.kt +++ b/sentry-android-replay/src/test/java/io/sentry/android/replay/ReplayIntegrationTest.kt @@ -32,6 +32,7 @@ import io.sentry.android.replay.capture.SessionCaptureStrategy import io.sentry.android.replay.capture.SessionCaptureStrategyTest.Fixture.Companion.VIDEO_DURATION import io.sentry.android.replay.gestures.GestureRecorder import io.sentry.cache.PersistingScopeObserver +import io.sentry.cache.tape.QueueFile import io.sentry.protocol.SentryException import io.sentry.protocol.SentryId import io.sentry.rrweb.RRWebBreadcrumbEvent @@ -59,6 +60,7 @@ import org.mockito.kotlin.times import org.mockito.kotlin.verify import org.mockito.kotlin.whenever import org.robolectric.annotation.Config +import java.io.ByteArrayOutputStream import java.io.File import kotlin.test.BeforeTest import kotlin.test.Test @@ -456,6 +458,7 @@ class ReplayIntegrationTest { val oldReplayId = SentryId() fixture.options.cacheDirPath = tmpDir.newFolder().absolutePath + fixture.options.addScopeObserver(PersistingScopeObserver(fixture.options)) val oldReplay = File(fixture.options.cacheDirPath, "replay_$oldReplayId").also { it.mkdirs() } val screenshot = File(oldReplay, "1720693523997.jpg").also { it.createNewFile() } @@ -472,17 +475,18 @@ class ReplayIntegrationTest { it.writeText("\"$oldReplayId\"") } val breadcrumbsFile = File(scopeCache, PersistingScopeObserver.BREADCRUMBS_FILENAME) + val queueFile = QueueFile.Builder(breadcrumbsFile).build() + val baos = ByteArrayOutputStream() fixture.options.serializer.serialize( - listOf( - Breadcrumb(DateUtils.getDateTime("2024-07-11T10:25:23.454Z")).apply { - category = "navigation" - type = "navigation" - setData("from", "from") - setData("to", "to") - } - ), - breadcrumbsFile.writer() + Breadcrumb(DateUtils.getDateTime("2024-07-11T10:25:23.454Z")).apply { + category = "navigation" + type = "navigation" + setData("from", "from") + setData("to", "to") + }, + baos.writer() ) + queueFile.add(baos.toByteArray()) File(oldReplay, ONGOING_SEGMENT).also { it.writeText( """ diff --git a/sentry/api/sentry.api b/sentry/api/sentry.api index b7cb1adfc7e..d26d3474911 100644 --- a/sentry/api/sentry.api +++ b/sentry/api/sentry.api @@ -2365,6 +2365,7 @@ public class io/sentry/SentryOptions { public fun addScopeObserver (Lio/sentry/IScopeObserver;)V public fun addTracingOrigin (Ljava/lang/String;)V public static fun empty ()Lio/sentry/SentryOptions; + public fun findPersistingScopeObserver ()Lio/sentry/cache/PersistingScopeObserver; public fun getBackpressureMonitor ()Lio/sentry/backpressure/IBackpressureMonitor; public fun getBeforeBreadcrumb ()Lio/sentry/SentryOptions$BeforeBreadcrumbCallback; public fun getBeforeEmitMetricCallback ()Lio/sentry/SentryOptions$BeforeEmitMetricCallback; @@ -3395,8 +3396,9 @@ public final class io/sentry/cache/PersistingScopeObserver : io/sentry/ScopeObse public static final field TRANSACTION_FILENAME Ljava/lang/String; public static final field USER_FILENAME Ljava/lang/String; public fun (Lio/sentry/SentryOptions;)V - public static fun read (Lio/sentry/SentryOptions;Ljava/lang/String;Ljava/lang/Class;)Ljava/lang/Object; - public static fun read (Lio/sentry/SentryOptions;Ljava/lang/String;Ljava/lang/Class;Lio/sentry/JsonDeserializer;)Ljava/lang/Object; + public fun addBreadcrumb (Lio/sentry/Breadcrumb;)V + public fun read (Lio/sentry/SentryOptions;Ljava/lang/String;Ljava/lang/Class;)Ljava/lang/Object; + public fun resetCache ()V public fun setBreadcrumbs (Ljava/util/Collection;)V public fun setContexts (Lio/sentry/protocol/Contexts;)V public fun setExtras (Ljava/util/Map;)V @@ -3411,6 +3413,50 @@ public final class io/sentry/cache/PersistingScopeObserver : io/sentry/ScopeObse public static fun store (Lio/sentry/SentryOptions;Ljava/lang/Object;Ljava/lang/String;)V } +public abstract class io/sentry/cache/tape/ObjectQueue : java/io/Closeable, java/lang/Iterable { + public fun ()V + public abstract fun add (Ljava/lang/Object;)V + public fun asList ()Ljava/util/List; + public fun clear ()V + public static fun create (Lio/sentry/cache/tape/QueueFile;Lio/sentry/cache/tape/ObjectQueue$Converter;)Lio/sentry/cache/tape/ObjectQueue; + public static fun createEmpty ()Lio/sentry/cache/tape/ObjectQueue; + public abstract fun file ()Lio/sentry/cache/tape/QueueFile; + public fun isEmpty ()Z + public abstract fun peek ()Ljava/lang/Object; + public fun peek (I)Ljava/util/List; + public fun remove ()V + public abstract fun remove (I)V + public abstract fun size ()I +} + +public abstract interface class io/sentry/cache/tape/ObjectQueue$Converter { + public abstract fun from ([B)Ljava/lang/Object; + public abstract fun toStream (Ljava/lang/Object;Ljava/io/OutputStream;)V +} + +public final class io/sentry/cache/tape/QueueFile : java/io/Closeable, java/lang/Iterable { + public fun add ([B)V + public fun add ([BII)V + public fun clear ()V + public fun close ()V + public fun file ()Ljava/io/File; + public fun isAtFullCapacity ()Z + public fun isEmpty ()Z + public fun iterator ()Ljava/util/Iterator; + public fun peek ()[B + public fun remove ()V + public fun remove (I)V + public fun size ()I + public fun toString ()Ljava/lang/String; +} + +public final class io/sentry/cache/tape/QueueFile$Builder { + public fun (Ljava/io/File;)V + public fun build ()Lio/sentry/cache/tape/QueueFile; + public fun size (I)Lio/sentry/cache/tape/QueueFile$Builder; + public fun zero (Z)Lio/sentry/cache/tape/QueueFile$Builder; +} + public final class io/sentry/clientreport/ClientReport : io/sentry/JsonSerializable, io/sentry/JsonUnknown { public fun (Ljava/util/Date;Ljava/util/List;)V public fun getDiscardedEvents ()Ljava/util/List; diff --git a/sentry/build.gradle.kts b/sentry/build.gradle.kts index 08efc550d5a..726b6f2f2f7 100644 --- a/sentry/build.gradle.kts +++ b/sentry/build.gradle.kts @@ -33,6 +33,7 @@ dependencies { testImplementation(Config.TestLibs.awaitility) testImplementation(Config.TestLibs.javaFaker) testImplementation(Config.TestLibs.msgpack) + testImplementation(Config.TestLibs.okio) testImplementation(projects.sentryTestSupport) } diff --git a/sentry/src/main/java/io/sentry/Sentry.java b/sentry/src/main/java/io/sentry/Sentry.java index 4d363dc2612..49abb6b24cd 100644 --- a/sentry/src/main/java/io/sentry/Sentry.java +++ b/sentry/src/main/java/io/sentry/Sentry.java @@ -3,6 +3,7 @@ import io.sentry.backpressure.BackpressureMonitor; import io.sentry.cache.EnvelopeCache; import io.sentry.cache.IEnvelopeCache; +import io.sentry.cache.PersistingScopeObserver; import io.sentry.config.PropertiesProviderFactory; import io.sentry.internal.debugmeta.NoOpDebugMetaLoader; import io.sentry.internal.debugmeta.ResourcesDebugMetaLoader; @@ -375,6 +376,16 @@ private static void notifyOptionsObservers(final @NotNull SentryOptions options) observer.setReplayErrorSampleRate( options.getSessionReplay().getOnErrorSampleRate()); } + + // since it's a new SDK init we clean up persisted scope values before serializing + // new ones, so they are not making it to the new events if they were e.g. disabled + // (e.g. replayId) or are simply irrelevant (e.g. breadcrumbs). NOTE: this happens + // after the integrations relying on those values are done with processing them. + final @Nullable PersistingScopeObserver scopeCache = + options.findPersistingScopeObserver(); + if (scopeCache != null) { + scopeCache.resetCache(); + } }); } catch (Throwable e) { options.getLogger().log(SentryLevel.DEBUG, "Failed to notify options observers.", e); diff --git a/sentry/src/main/java/io/sentry/SentryOptions.java b/sentry/src/main/java/io/sentry/SentryOptions.java index 8e528ba5089..b238b057844 100644 --- a/sentry/src/main/java/io/sentry/SentryOptions.java +++ b/sentry/src/main/java/io/sentry/SentryOptions.java @@ -4,6 +4,7 @@ import io.sentry.backpressure.IBackpressureMonitor; import io.sentry.backpressure.NoOpBackpressureMonitor; import io.sentry.cache.IEnvelopeCache; +import io.sentry.cache.PersistingScopeObserver; import io.sentry.clientreport.ClientReportRecorder; import io.sentry.clientreport.IClientReportRecorder; import io.sentry.clientreport.NoOpClientReportRecorder; @@ -1460,6 +1461,17 @@ public List getScopeObservers() { return observers; } + @ApiStatus.Internal + @Nullable + public PersistingScopeObserver findPersistingScopeObserver() { + for (final @NotNull IScopeObserver observer : observers) { + if (observer instanceof PersistingScopeObserver) { + return (PersistingScopeObserver) observer; + } + } + return null; + } + /** * Adds a SentryOptions observer * diff --git a/sentry/src/main/java/io/sentry/cache/CacheUtils.java b/sentry/src/main/java/io/sentry/cache/CacheUtils.java index 1eb5f7e19f4..eb9732a3439 100644 --- a/sentry/src/main/java/io/sentry/cache/CacheUtils.java +++ b/sentry/src/main/java/io/sentry/cache/CacheUtils.java @@ -38,13 +38,6 @@ static void store( } final File file = new File(cacheDir, fileName); - if (file.exists()) { - options.getLogger().log(DEBUG, "Overwriting %s in scope cache", fileName); - if (!file.delete()) { - options.getLogger().log(SentryLevel.ERROR, "Failed to delete: %s", file.getAbsolutePath()); - } - } - try (final OutputStream outputStream = new FileOutputStream(file); final Writer writer = new BufferedWriter(new OutputStreamWriter(outputStream, UTF_8))) { options.getSerializer().serialize(entity, writer); @@ -64,11 +57,9 @@ static void delete( } final File file = new File(cacheDir, fileName); - if (file.exists()) { - options.getLogger().log(DEBUG, "Deleting %s from scope cache", fileName); - if (!file.delete()) { - options.getLogger().log(SentryLevel.ERROR, "Failed to delete: %s", file.getAbsolutePath()); - } + options.getLogger().log(DEBUG, "Deleting %s from scope cache", fileName); + if (!file.delete()) { + options.getLogger().log(SentryLevel.ERROR, "Failed to delete: %s", file.getAbsolutePath()); } } @@ -102,7 +93,7 @@ static void delete( return null; } - private static @Nullable File ensureCacheDir( + static @Nullable File ensureCacheDir( final @NotNull SentryOptions options, final @NotNull String cacheDirName) { final String cacheDir = options.getCacheDirPath(); if (cacheDir == null) { diff --git a/sentry/src/main/java/io/sentry/cache/PersistingScopeObserver.java b/sentry/src/main/java/io/sentry/cache/PersistingScopeObserver.java index 908e2c66e41..c9356579c9c 100644 --- a/sentry/src/main/java/io/sentry/cache/PersistingScopeObserver.java +++ b/sentry/src/main/java/io/sentry/cache/PersistingScopeObserver.java @@ -1,18 +1,33 @@ package io.sentry.cache; import static io.sentry.SentryLevel.ERROR; +import static io.sentry.SentryLevel.INFO; +import static io.sentry.cache.CacheUtils.ensureCacheDir; import io.sentry.Breadcrumb; import io.sentry.IScope; -import io.sentry.JsonDeserializer; import io.sentry.ScopeObserverAdapter; import io.sentry.SentryLevel; import io.sentry.SentryOptions; import io.sentry.SpanContext; +import io.sentry.cache.tape.ObjectQueue; +import io.sentry.cache.tape.QueueFile; import io.sentry.protocol.Contexts; import io.sentry.protocol.Request; import io.sentry.protocol.SentryId; import io.sentry.protocol.User; +import io.sentry.util.LazyEvaluator; +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.io.Writer; +import java.nio.charset.Charset; import java.util.Collection; import java.util.Map; import org.jetbrains.annotations.NotNull; @@ -20,6 +35,8 @@ public final class PersistingScopeObserver extends ScopeObserverAdapter { + private static final Charset UTF_8 = Charset.forName("UTF-8"); + public static final String SCOPE_CACHE = ".scope-cache"; public static final String USER_FILENAME = "user.json"; public static final String BREADCRUMBS_FILENAME = "breadcrumbs.json"; @@ -33,7 +50,60 @@ public final class PersistingScopeObserver extends ScopeObserverAdapter { public static final String TRACE_FILENAME = "trace.json"; public static final String REPLAY_FILENAME = "replay.json"; - private final @NotNull SentryOptions options; + private @NotNull SentryOptions options; + private final @NotNull LazyEvaluator> breadcrumbsQueue = + new LazyEvaluator<>( + () -> { + final File cacheDir = ensureCacheDir(options, SCOPE_CACHE); + if (cacheDir == null) { + options.getLogger().log(INFO, "Cache dir is not set, cannot store in scope cache"); + return ObjectQueue.createEmpty(); + } + + QueueFile queueFile = null; + final File file = new File(cacheDir, BREADCRUMBS_FILENAME); + try { + try { + queueFile = new QueueFile.Builder(file).size(options.getMaxBreadcrumbs()).build(); + } catch (IOException e) { + // if file is corrupted we simply delete it and try to create it again. We accept + // the trade + // off of losing breadcrumbs for ANRs that happened right before the app has + // received an + // update where the new format was introduced + file.delete(); + + queueFile = new QueueFile.Builder(file).size(options.getMaxBreadcrumbs()).build(); + } + } catch (IOException e) { + options.getLogger().log(ERROR, "Failed to create breadcrumbs queue", e); + return ObjectQueue.createEmpty(); + } + return ObjectQueue.create( + queueFile, + new ObjectQueue.Converter() { + @Override + @Nullable + public Breadcrumb from(byte[] source) { + try (final Reader reader = + new BufferedReader( + new InputStreamReader(new ByteArrayInputStream(source), UTF_8))) { + return options.getSerializer().deserialize(reader, Breadcrumb.class); + } catch (Throwable e) { + options.getLogger().log(ERROR, e, "Error reading entity from scope cache"); + } + return null; + } + + @Override + public void toStream(Breadcrumb value, OutputStream sink) throws IOException { + try (final Writer writer = + new BufferedWriter(new OutputStreamWriter(sink, UTF_8))) { + options.getSerializer().serialize(value, writer); + } + } + }); + }); public PersistingScopeObserver(final @NotNull SentryOptions options) { this.options = options; @@ -51,9 +121,32 @@ public void setUser(final @Nullable User user) { }); } + @Override + public void addBreadcrumb(@NotNull Breadcrumb crumb) { + serializeToDisk( + () -> { + try { + breadcrumbsQueue.getValue().add(crumb); + } catch (IOException e) { + options.getLogger().log(ERROR, "Failed to add breadcrumb to file queue", e); + } + }); + } + @Override public void setBreadcrumbs(@NotNull Collection breadcrumbs) { - serializeToDisk(() -> store(breadcrumbs, BREADCRUMBS_FILENAME)); + if (breadcrumbs.isEmpty()) { + // we only clear the queue if the new collection is empty (someone called clearBreadcrumbs) + // If it's not empty, we'd add breadcrumbs one-by-one in the method above + serializeToDisk( + () -> { + try { + breadcrumbsQueue.getValue().clear(); + } catch (IOException e) { + options.getLogger().log(ERROR, "Failed to clear breadcrumbs from file queue", e); + } + }); + } } @Override @@ -133,9 +226,16 @@ public void setReplayId(@NotNull SentryId replayId) { @SuppressWarnings("FutureReturnValueIgnored") private void serializeToDisk(final @NotNull Runnable task) { + if (!options.isEnableScopePersistence()) { + return; + } if (Thread.currentThread().getName().contains("SentryExecutor")) { // we're already on the sentry executor thread, so we can just execute it directly - task.run(); + try { + task.run(); + } catch (Throwable e) { + options.getLogger().log(ERROR, "Serialization task failed", e); + } return; } @@ -170,18 +270,42 @@ public static void store( CacheUtils.store(options, entity, SCOPE_CACHE, fileName); } - public static @Nullable T read( + public @Nullable T read( final @NotNull SentryOptions options, final @NotNull String fileName, final @NotNull Class clazz) { - return read(options, fileName, clazz, null); + if (fileName.equals(BREADCRUMBS_FILENAME)) { + try { + return clazz.cast(breadcrumbsQueue.getValue().asList()); + } catch (IOException e) { + options.getLogger().log(ERROR, "Unable to read serialized breadcrumbs from QueueFile"); + return null; + } + } + return CacheUtils.read(options, SCOPE_CACHE, fileName, clazz, null); } - public static @Nullable T read( - final @NotNull SentryOptions options, - final @NotNull String fileName, - final @NotNull Class clazz, - final @Nullable JsonDeserializer elementDeserializer) { - return CacheUtils.read(options, SCOPE_CACHE, fileName, clazz, elementDeserializer); + /** + * Resets the scope cache by deleting the files and/or clearing the QueueFiles. Note: this does + * I/O and should be called from a background thread. + */ + public void resetCache() { + // since it keeps a reference to the file and we cannot delete it, breadcrumbs we just clear + try { + breadcrumbsQueue.getValue().clear(); + } catch (IOException e) { + options.getLogger().log(ERROR, "Failed to clear breadcrumbs from file queue", e); + } + + // the rest we can safely delete + delete(USER_FILENAME); + delete(LEVEL_FILENAME); + delete(REQUEST_FILENAME); + delete(FINGERPRINT_FILENAME); + delete(CONTEXTS_FILENAME); + delete(EXTRAS_FILENAME); + delete(TAGS_FILENAME); + delete(TRACE_FILENAME); + delete(TRANSACTION_FILENAME); } } diff --git a/sentry/src/main/java/io/sentry/cache/tape/EmptyObjectQueue.java b/sentry/src/main/java/io/sentry/cache/tape/EmptyObjectQueue.java new file mode 100644 index 00000000000..2aa41c9b791 --- /dev/null +++ b/sentry/src/main/java/io/sentry/cache/tape/EmptyObjectQueue.java @@ -0,0 +1,52 @@ +package io.sentry.cache.tape; + +import java.io.IOException; +import java.util.Iterator; +import java.util.NoSuchElementException; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +final class EmptyObjectQueue extends ObjectQueue { + @Override + public @Nullable QueueFile file() { + return null; + } + + @Override + public int size() { + return 0; + } + + @Override + public void add(T entry) throws IOException {} + + @Override + public @Nullable T peek() throws IOException { + return null; + } + + @Override + public void remove(int n) throws IOException {} + + @Override + public void close() throws IOException {} + + @NotNull + @Override + public Iterator iterator() { + return new EmptyIterator<>(); + } + + private static final class EmptyIterator implements Iterator { + + @Override + public boolean hasNext() { + return false; + } + + @Override + public T next() { + throw new NoSuchElementException("No elements in EmptyIterator!"); + } + } +} diff --git a/sentry/src/main/java/io/sentry/cache/tape/FileObjectQueue.java b/sentry/src/main/java/io/sentry/cache/tape/FileObjectQueue.java new file mode 100644 index 00000000000..8ed9cef56e1 --- /dev/null +++ b/sentry/src/main/java/io/sentry/cache/tape/FileObjectQueue.java @@ -0,0 +1,148 @@ +/* + * Adapted from: https://github.com/square/tape/tree/445cd3fd0a7b3ec48c9ea3e0e86663fe6d3735d8/tape/src/main/java/com/squareup/tape2 + * + * Copyright (C) 2010 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.sentry.cache.tape; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Iterator; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +final class FileObjectQueue extends ObjectQueue { + /** Backing storage implementation. */ + private final QueueFile queueFile; + /** Reusable byte output buffer. */ + private final DirectByteArrayOutputStream bytes = new DirectByteArrayOutputStream(); + + final Converter converter; + + FileObjectQueue(QueueFile queueFile, Converter converter) { + this.queueFile = queueFile; + this.converter = converter; + } + + @Override + public @NotNull QueueFile file() { + return queueFile; + } + + @Override + public int size() { + return queueFile.size(); + } + + @Override + public boolean isEmpty() { + return queueFile.isEmpty(); + } + + @Override + public void add(T entry) throws IOException { + bytes.reset(); + converter.toStream(entry, bytes); + queueFile.add(bytes.getArray(), 0, bytes.size()); + } + + @Override + public @Nullable T peek() throws IOException { + byte[] bytes = queueFile.peek(); + if (bytes == null) return null; + return converter.from(bytes); + } + + @Override + public void remove() throws IOException { + queueFile.remove(); + } + + @Override + public void remove(int n) throws IOException { + queueFile.remove(n); + } + + @Override + public void clear() throws IOException { + queueFile.clear(); + } + + @Override + public void close() throws IOException { + queueFile.close(); + } + + /** + * Returns an iterator over entries in this queue. + * + *

The iterator disallows modifications to the queue during iteration. Removing entries from + * the head of the queue is permitted during iteration using {@link Iterator#remove()}. + * + *

The iterator may throw an unchecked {@link IOException} during {@link Iterator#next()} or + * {@link Iterator#remove()}. + */ + @Override + public Iterator iterator() { + return new QueueFileIterator(queueFile.iterator()); + } + + @Override + public String toString() { + return "FileObjectQueue{" + "queueFile=" + queueFile + '}'; + } + + private final class QueueFileIterator implements Iterator { + final Iterator iterator; + + QueueFileIterator(Iterator iterator) { + this.iterator = iterator; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + @Nullable + public T next() { + byte[] data = iterator.next(); + try { + return converter.from(data); + } catch (IOException e) { + throw QueueFile.getSneakyThrowable(e); + } + } + + @Override + public void remove() { + iterator.remove(); + } + } + + /** Enables direct access to the internal array. Avoids unnecessary copying. */ + private static final class DirectByteArrayOutputStream extends ByteArrayOutputStream { + DirectByteArrayOutputStream() {} + + /** + * Gets a reference to the internal byte array. The {@link #size()} method indicates how many + * bytes contain actual data added since the last {@link #reset()} call. + */ + byte[] getArray() { + return buf; + } + } +} diff --git a/sentry/src/main/java/io/sentry/cache/tape/ObjectQueue.java b/sentry/src/main/java/io/sentry/cache/tape/ObjectQueue.java new file mode 100644 index 00000000000..c92cad36218 --- /dev/null +++ b/sentry/src/main/java/io/sentry/cache/tape/ObjectQueue.java @@ -0,0 +1,108 @@ +/* + * Adapted from: https://github.com/square/tape/tree/445cd3fd0a7b3ec48c9ea3e0e86663fe6d3735d8/tape/src/main/java/com/squareup/tape2 + * + * Copyright (C) 2010 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.sentry.cache.tape; + +import java.io.Closeable; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.Nullable; + +/** A queue of objects. */ +@ApiStatus.Internal +public abstract class ObjectQueue implements Iterable, Closeable { + /** A queue for objects that are atomically and durably serialized to {@code file}. */ + public static ObjectQueue create(QueueFile qf, Converter converter) { + return new FileObjectQueue<>(qf, converter); + } + + /** An empty queue for objects that is essentially a no-op. */ + public static ObjectQueue createEmpty() { + return new EmptyObjectQueue<>(); + } + + /** The underlying {@link QueueFile} backing this queue, or null if it's only in memory. */ + public abstract @Nullable QueueFile file(); + + /** Returns the number of entries in the queue. */ + public abstract int size(); + + /** Returns {@code true} if this queue contains no entries. */ + public boolean isEmpty() { + return size() == 0; + } + + /** Enqueues an entry that can be processed at any time. */ + public abstract void add(T entry) throws IOException; + + /** + * Returns the head of the queue, or {@code null} if the queue is empty. Does not modify the + * queue. + */ + public abstract @Nullable T peek() throws IOException; + + /** + * Reads up to {@code max} entries from the head of the queue without removing the entries. If the + * queue's {@link #size()} is less than {@code max} then only {@link #size()} entries are read. + */ + public List peek(int max) throws IOException { + int end = Math.min(max, size()); + List subList = new ArrayList(end); + Iterator iterator = iterator(); + for (int i = 0; i < end; i++) { + subList.add(iterator.next()); + } + return Collections.unmodifiableList(subList); + } + + /** Returns the entries in the queue as an unmodifiable {@link List}. */ + public List asList() throws IOException { + return peek(size()); + } + + /** Removes the head of the queue. */ + public void remove() throws IOException { + remove(1); + } + + /** Removes {@code n} entries from the head of the queue. */ + public abstract void remove(int n) throws IOException; + + /** Clears this queue. Also truncates the file to the initial size. */ + public void clear() throws IOException { + remove(size()); + } + + /** + * Convert a byte stream to and from a concrete type. + * + * @param Object type. + */ + public interface Converter { + /** Converts bytes to an object. */ + @Nullable + T from(byte[] source) throws IOException; + + /** Converts {@code value} to bytes written to the specified stream. */ + void toStream(T value, OutputStream sink) throws IOException; + } +} diff --git a/sentry/src/main/java/io/sentry/cache/tape/QueueFile.java b/sentry/src/main/java/io/sentry/cache/tape/QueueFile.java new file mode 100644 index 00000000000..bc2ed568267 --- /dev/null +++ b/sentry/src/main/java/io/sentry/cache/tape/QueueFile.java @@ -0,0 +1,817 @@ +/* + * Adapted from: https://github.com/square/tape/tree/445cd3fd0a7b3ec48c9ea3e0e86663fe6d3735d8/tape/src/main/java/com/squareup/tape2 + * + * Copyright (C) 2010 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.sentry.cache.tape; + +import static java.lang.Math.min; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; +import org.jetbrains.annotations.ApiStatus; +import org.jetbrains.annotations.Nullable; + +/** + * A reliable, efficient, file-based, FIFO queue. Additions and removals are O(1). All operations + * are atomic. Writes are synchronous; data will be written to disk before an operation returns. The + * underlying file is structured to survive process and even system crashes. If an I/O exception is + * thrown during a mutating change, the change is aborted. It is safe to continue to use a {@code + * QueueFile} instance after an exception. + * + *

Note that this implementation is not synchronized. + * + *

In a traditional queue, the remove operation returns an element. In this queue, {@link #peek} + * and {@link #remove} are used in conjunction. Use {@code peek} to retrieve the first element, and + * then {@code remove} to remove it after successful processing. If the system crashes after {@code + * peek} and during processing, the element will remain in the queue, to be processed when the + * system restarts. + * + *

NOTE: The current implementation is built for file systems that support + * atomic segment writes (like YAFFS). Most conventional file systems don't support this; if the + * power goes out while writing a segment, the segment will contain garbage and the file will be + * corrupt. We'll add journaling support so this class can be used with more file systems later. + * + *

Construct instances with {@link Builder}. + * + * @author Bob Lee (bob@squareup.com) + */ +@ApiStatus.Internal +public final class QueueFile implements Closeable, Iterable { + /** Leading bit set to 1 indicating a versioned header and the version of 1. */ + private static final int VERSIONED_HEADER = 0x80000001; + + /** Initial file size in bytes. */ + static final int INITIAL_LENGTH = 4096; // one file system block + + /** A block of nothing to write over old data. */ + private static final byte[] ZEROES = new byte[INITIAL_LENGTH]; + + /** + * The underlying file. Uses a ring buffer to store entries. Designed so that a modification isn't + * committed or visible until we write the header. The header is much smaller than a segment. So + * long as the underlying file system supports atomic segment writes, changes to the queue are + * atomic. Storing the file length ensures we can recover from a failed expansion (i.e. if setting + * the file length succeeds but the process dies before the data can be copied). + * + *

This implementation supports two versions of the on-disk format. + * + *

+   * Format:
+   *   16-32 bytes      Header
+   *   ...              Data
+   *
+   * Header (32 bytes):
+   *   1 bit            Versioned indicator [0 = legacy (see "Legacy Header"), 1 = versioned]
+   *   31 bits          Version, always 1
+   *   8 bytes          File length
+   *   4 bytes          Element count
+   *   8 bytes          Head element position
+   *   8 bytes          Tail element position
+   *
+   * Element:
+   *   4 bytes          Data length
+   *   ...              Data
+   * 
+ */ + RandomAccessFile raf; + + /** Keep file around for error reporting. */ + final File file; + + /** The header length in bytes: 16 or 32. */ + final int headerLength = 32; + + /** Cached file length. Always a power of 2. */ + long fileLength; + + /** Number of elements. */ + int elementCount; + + /** Pointer to first (or eldest) element. */ + Element first; + + /** Pointer to last (or newest) element. */ + private Element last; + + /** In-memory buffer. Big enough to hold the header. */ + private final byte[] buffer = new byte[32]; + + /** + * The number of times this file has been structurally modified — it is incremented during {@link + * #remove(int)} and {@link #add(byte[], int, int)}. Used by {@link ElementIterator} to guard + * against concurrent modification. + */ + int modCount = 0; + + /** When true, removing an element will also overwrite data with zero bytes. */ + private final boolean zero; + + /** A number of elements at which this queue will wrap around (ring buffer). */ + private final int maxElements; + + boolean closed; + + static RandomAccessFile initializeFromFile(File file) throws IOException { + if (!file.exists()) { + // Use a temp file so we don't leave a partially-initialized file. + File tempFile = new File(file.getPath() + ".tmp"); + RandomAccessFile raf = open(tempFile); + try { + raf.setLength(INITIAL_LENGTH); + raf.seek(0); + raf.writeInt(VERSIONED_HEADER); + raf.writeLong(INITIAL_LENGTH); + } finally { + raf.close(); + } + + // A rename is atomic. + if (!tempFile.renameTo(file)) { + throw new IOException("Rename failed!"); + } + } + + return open(file); + } + + /** Opens a random access file that writes synchronously. */ + private static RandomAccessFile open(File file) throws FileNotFoundException { + return new RandomAccessFile(file, "rwd"); + } + + QueueFile(File file, RandomAccessFile raf, boolean zero, int maxElements) throws IOException { + this.file = file; + this.raf = raf; + this.zero = zero; + this.maxElements = maxElements; + + readInitialData(); + } + + private void readInitialData() throws IOException { + raf.seek(0); + raf.readFully(buffer); + + long firstOffset; + long lastOffset; + + fileLength = readLong(buffer, 4); + elementCount = readInt(buffer, 12); + firstOffset = readLong(buffer, 16); + lastOffset = readLong(buffer, 24); + + if (fileLength > raf.length()) { + throw new IOException( + "File is truncated. Expected length: " + fileLength + ", Actual length: " + raf.length()); + } else if (fileLength <= headerLength) { + throw new IOException( + "File is corrupt; length stored in header (" + fileLength + ") is invalid."); + } + + first = readElement(firstOffset); + last = readElement(lastOffset); + } + + private void resetFile() throws IOException { + raf.close(); + file.delete(); + raf = initializeFromFile(file); + readInitialData(); + } + + /** + * Stores an {@code int} in the {@code byte[]}. The behavior is equivalent to calling {@link + * RandomAccessFile#writeInt}. + */ + private static void writeInt(byte[] buffer, int offset, int value) { + buffer[offset] = (byte) (value >> 24); + buffer[offset + 1] = (byte) (value >> 16); + buffer[offset + 2] = (byte) (value >> 8); + buffer[offset + 3] = (byte) value; + } + + /** Reads an {@code int} from the {@code byte[]}. */ + private static int readInt(byte[] buffer, int offset) { + return ((buffer[offset] & 0xff) << 24) + + ((buffer[offset + 1] & 0xff) << 16) + + ((buffer[offset + 2] & 0xff) << 8) + + (buffer[offset + 3] & 0xff); + } + + /** + * Stores an {@code long} in the {@code byte[]}. The behavior is equivalent to calling {@link + * RandomAccessFile#writeLong}. + */ + private static void writeLong(byte[] buffer, int offset, long value) { + buffer[offset] = (byte) (value >> 56); + buffer[offset + 1] = (byte) (value >> 48); + buffer[offset + 2] = (byte) (value >> 40); + buffer[offset + 3] = (byte) (value >> 32); + buffer[offset + 4] = (byte) (value >> 24); + buffer[offset + 5] = (byte) (value >> 16); + buffer[offset + 6] = (byte) (value >> 8); + buffer[offset + 7] = (byte) value; + } + + /** Reads an {@code long} from the {@code byte[]}. */ + private static long readLong(byte[] buffer, int offset) { + return ((buffer[offset] & 0xffL) << 56) + + ((buffer[offset + 1] & 0xffL) << 48) + + ((buffer[offset + 2] & 0xffL) << 40) + + ((buffer[offset + 3] & 0xffL) << 32) + + ((buffer[offset + 4] & 0xffL) << 24) + + ((buffer[offset + 5] & 0xffL) << 16) + + ((buffer[offset + 6] & 0xffL) << 8) + + (buffer[offset + 7] & 0xffL); + } + + /** + * Writes header atomically. The arguments contain the updated values. The class member fields + * should not have changed yet. This only updates the state in the file. It's up to the caller to + * update the class member variables *after* this call succeeds. Assumes segment writes are atomic + * in the underlying file system. + */ + private void writeHeader(long fileLength, int elementCount, long firstPosition, long lastPosition) + throws IOException { + raf.seek(0); + + writeInt(buffer, 0, VERSIONED_HEADER); + writeLong(buffer, 4, fileLength); + writeInt(buffer, 12, elementCount); + writeLong(buffer, 16, firstPosition); + writeLong(buffer, 24, lastPosition); + raf.write(buffer, 0, 32); + } + + Element readElement(long position) throws IOException { + if (position == 0) return Element.NULL; + boolean success = ringRead(position, buffer, 0, Element.HEADER_LENGTH); + if (!success) { + return Element.NULL; + } + int length = readInt(buffer, 0); + return new Element(position, length); + } + + /** Wraps the position if it exceeds the end of the file. */ + long wrapPosition(long position) { + return position < fileLength ? position : headerLength + position - fileLength; + } + + /** + * Writes count bytes from buffer to position in file. Automatically wraps write if position is + * past the end of the file or if buffer overlaps it. + * + * @param position in file to write to + * @param buffer to write from + * @param count # of bytes to write + */ + private void ringWrite(long position, byte[] buffer, int offset, int count) throws IOException { + position = wrapPosition(position); + if (position + count <= fileLength) { + raf.seek(position); + raf.write(buffer, offset, count); + } else { + // The write overlaps the EOF. + // # of bytes to write before the EOF. Guaranteed to be less than Integer.MAX_VALUE. + int beforeEof = (int) (fileLength - position); + raf.seek(position); + raf.write(buffer, offset, beforeEof); + raf.seek(headerLength); + raf.write(buffer, offset + beforeEof, count - beforeEof); + } + } + + private void ringErase(long position, long length) throws IOException { + while (length > 0) { + int chunk = (int) min(length, ZEROES.length); + ringWrite(position, ZEROES, 0, chunk); + length -= chunk; + position += chunk; + } + } + + /** + * Reads count bytes into buffer from file. Wraps if necessary. + * + * @param position in file to read from + * @param buffer to read into + * @param count # of bytes to read + * @return true if the read was successful, false if the file is corrupt + */ + boolean ringRead(long position, byte[] buffer, int offset, int count) throws IOException { + try { + position = wrapPosition(position); + if (position + count <= fileLength) { + raf.seek(position); + raf.readFully(buffer, offset, count); + } else { + // The read overlaps the EOF. + // # of bytes to read before the EOF. Guaranteed to be less than Integer.MAX_VALUE. + int beforeEof = (int) (fileLength - position); + raf.seek(position); + raf.readFully(buffer, offset, beforeEof); + raf.seek(headerLength); + raf.readFully(buffer, offset + beforeEof, count - beforeEof); + } + return true; + } catch (EOFException e) { + // since EOFException inherits from IOException, we need to catch it explicitly + // and reset the file + resetFile(); + } catch (IOException e) { + throw e; + } catch (Throwable e) { + // most likely the file is corrupt, so we delete it and recreate, accepting data loss + resetFile(); + } + return false; + } + + /** + * Adds an element to the end of the queue. + * + * @param data to copy bytes from + */ + public void add(byte[] data) throws IOException { + add(data, 0, data.length); + } + + /** + * Adds an element to the end of the queue. + * + * @param data to copy bytes from + * @param offset to start from in buffer + * @param count number of bytes to copy + * @throws IndexOutOfBoundsException if {@code offset < 0} or {@code count < 0}, or if {@code + * offset + count} is bigger than the length of {@code buffer}. + */ + public void add(byte[] data, int offset, int count) throws IOException { + if (data == null) { + throw new NullPointerException("data == null"); + } + if ((offset | count) < 0 || count > data.length - offset) { + throw new IndexOutOfBoundsException(); + } + if (closed) throw new IllegalStateException("closed"); + + // If the queue is at full capacity, remove the oldest element first. + if (isAtFullCapacity()) { + remove(); + } + + expandIfNecessary(count); + + // Insert a new element after the current last element. + boolean wasEmpty = isEmpty(); + long position = + wasEmpty ? headerLength : wrapPosition(last.position + Element.HEADER_LENGTH + last.length); + Element newLast = new Element(position, count); + + // Write length. + writeInt(buffer, 0, count); + ringWrite(newLast.position, buffer, 0, Element.HEADER_LENGTH); + + // Write data. + ringWrite(newLast.position + Element.HEADER_LENGTH, data, offset, count); + + // Commit the addition. If wasEmpty, first == last. + long firstPosition = wasEmpty ? newLast.position : first.position; + writeHeader(fileLength, elementCount + 1, firstPosition, newLast.position); + last = newLast; + elementCount++; + modCount++; + if (wasEmpty) first = last; // first element + } + + private long usedBytes() { + if (elementCount == 0) return headerLength; + + if (last.position >= first.position) { + // Contiguous queue. + return (last.position - first.position) // all but last entry + + Element.HEADER_LENGTH + + last.length // last entry + + headerLength; + } else { + // tail < head. The queue wraps. + return last.position // buffer front + header + + Element.HEADER_LENGTH + + last.length // last entry + + fileLength + - first.position; // buffer end + } + } + + private long remainingBytes() { + return fileLength - usedBytes(); + } + + /** Returns true if this queue contains no entries. */ + public boolean isEmpty() { + return elementCount == 0; + } + + /** + * If necessary, expands the file to accommodate an additional element of the given length. + * + * @param dataLength length of data being added + */ + private void expandIfNecessary(long dataLength) throws IOException { + long elementLength = Element.HEADER_LENGTH + dataLength; + long remainingBytes = remainingBytes(); + if (remainingBytes >= elementLength) return; + + // Expand. + long previousLength = fileLength; + long newLength; + // Double the length until we can fit the new data. + do { + remainingBytes += previousLength; + newLength = previousLength << 1; + previousLength = newLength; + } while (remainingBytes < elementLength); + + setLength(newLength); + + // Calculate the position of the tail end of the data in the ring buffer + long endOfLastElement = wrapPosition(last.position + Element.HEADER_LENGTH + last.length); + long count = 0; + // If the buffer is split, we need to make it contiguous + if (endOfLastElement <= first.position) { + FileChannel channel = raf.getChannel(); + channel.position(fileLength); // destination position + count = endOfLastElement - headerLength; + if (channel.transferTo(headerLength, count, channel) != count) { + throw new AssertionError("Copied insufficient number of bytes!"); + } + } + + // Commit the expansion. + if (last.position < first.position) { + long newLastPosition = fileLength + last.position - headerLength; + writeHeader(newLength, elementCount, first.position, newLastPosition); + last = new Element(newLastPosition, last.length); + } else { + writeHeader(newLength, elementCount, first.position, last.position); + } + + fileLength = newLength; + + if (zero) { + ringErase(headerLength, count); + } + } + + /** Sets the length of the file. */ + private void setLength(long newLength) throws IOException { + // Set new file length (considered metadata) and sync it to storage. + raf.setLength(newLength); + raf.getChannel().force(true); + } + + /** Reads the eldest element. Returns null if the queue is empty. */ + public @Nullable byte[] peek() throws IOException { + if (closed) throw new IllegalStateException("closed"); + if (isEmpty()) return null; + int length = first.length; + byte[] data = new byte[length]; + boolean success = ringRead(first.position + Element.HEADER_LENGTH, data, 0, length); + return success ? data : null; + } + + /** + * Returns an iterator over elements in this QueueFile. + * + *

The iterator disallows modifications to be made to the QueueFile during iteration. Removing + * elements from the head of the QueueFile is permitted during iteration using {@link + * Iterator#remove()}. + * + *

The iterator may throw an unchecked {@link IOException} during {@link Iterator#next()} or + * {@link Iterator#remove()}. + */ + @Override + public Iterator iterator() { + return new ElementIterator(); + } + + private final class ElementIterator implements Iterator { + /** Index of element to be returned by subsequent call to next. */ + int nextElementIndex = 0; + + /** Position of element to be returned by subsequent call to next. */ + private long nextElementPosition = first.position; + + /** + * The {@link #modCount} value that the iterator believes that the backing QueueFile should + * have. If this expectation is violated, the iterator has detected concurrent modification. + */ + int expectedModCount = modCount; + + ElementIterator() {} + + private void checkForComodification() { + if (modCount != expectedModCount) throw new ConcurrentModificationException(); + } + + @Override + public boolean hasNext() { + if (closed) throw new IllegalStateException("closed"); + checkForComodification(); + return nextElementIndex != elementCount; + } + + @Override + public byte[] next() { + if (closed) throw new IllegalStateException("closed"); + checkForComodification(); + if (isEmpty()) throw new NoSuchElementException(); + if (nextElementIndex >= elementCount) throw new NoSuchElementException(); + + try { + // Read the current element. + Element current = readElement(nextElementPosition); + byte[] buffer = new byte[current.length]; + nextElementPosition = wrapPosition(current.position + Element.HEADER_LENGTH); + boolean success = ringRead(nextElementPosition, buffer, 0, current.length); + if (!success) { + // make it run out of bounds immediately + nextElementIndex = elementCount; + return ZEROES; + } + + // Update the pointer to the next element. + nextElementPosition = + wrapPosition(current.position + Element.HEADER_LENGTH + current.length); + nextElementIndex++; + + // Return the read element. + return buffer; + } catch (IOException e) { + throw QueueFile.getSneakyThrowable(e); + } catch (OutOfMemoryError e) { + // most likely the file is corrupted, so we delete it and recreate, accepting data loss + try { + resetFile(); + // make it run out of bounds immediately + nextElementIndex = elementCount; + } catch (IOException ex) { + throw QueueFile.getSneakyThrowable(ex); + } + return ZEROES; + } + } + + @Override + public void remove() { + checkForComodification(); + + if (isEmpty()) throw new NoSuchElementException(); + if (nextElementIndex != 1) { + throw new UnsupportedOperationException("Removal is only permitted from the head."); + } + + try { + QueueFile.this.remove(); + } catch (IOException e) { + throw QueueFile.getSneakyThrowable(e); + } + + expectedModCount = modCount; + nextElementIndex--; + } + } + + /** Returns the number of elements in this queue. */ + public int size() { + return elementCount; + } + + /** + * Removes the eldest element. + * + * @throws NoSuchElementException if the queue is empty + */ + public void remove() throws IOException { + remove(1); + } + + /** + * Removes the eldest {@code n} elements. + * + * @throws NoSuchElementException if the queue is empty + */ + public void remove(int n) throws IOException { + if (n < 0) { + throw new IllegalArgumentException("Cannot remove negative (" + n + ") number of elements."); + } + if (n == 0) { + return; + } + if (n == elementCount) { + clear(); + return; + } + if (isEmpty()) { + throw new NoSuchElementException(); + } + if (n > elementCount) { + throw new IllegalArgumentException( + "Cannot remove more elements (" + n + ") than present in queue (" + elementCount + ")."); + } + + long eraseStartPosition = first.position; + long eraseTotalLength = 0; + + // Read the position and length of the new first element. + long newFirstPosition = first.position; + int newFirstLength = first.length; + for (int i = 0; i < n; i++) { + eraseTotalLength += Element.HEADER_LENGTH + newFirstLength; + newFirstPosition = wrapPosition(newFirstPosition + Element.HEADER_LENGTH + newFirstLength); + boolean success = ringRead(newFirstPosition, buffer, 0, Element.HEADER_LENGTH); + if (!success) { + return; + } + newFirstLength = readInt(buffer, 0); + } + + // Commit the header. + writeHeader(fileLength, elementCount - n, newFirstPosition, last.position); + elementCount -= n; + modCount++; + first = new Element(newFirstPosition, newFirstLength); + + if (zero) { + ringErase(eraseStartPosition, eraseTotalLength); + } + } + + /** Clears this queue. Truncates the file to the initial size. */ + public void clear() throws IOException { + if (closed) throw new IllegalStateException("closed"); + + // Commit the header. + writeHeader(INITIAL_LENGTH, 0, 0, 0); + + if (zero) { + // Zero out data. + raf.seek(headerLength); + raf.write(ZEROES, 0, INITIAL_LENGTH - headerLength); + } + + elementCount = 0; + first = Element.NULL; + last = Element.NULL; + if (fileLength > INITIAL_LENGTH) setLength(INITIAL_LENGTH); + fileLength = INITIAL_LENGTH; + modCount++; + } + + /** + * Returns {@code true} if the capacity limit of this queue has been reached, i.e. the number of + * elements stored in the queue equals its maximum size. + * + * @return {@code true} if the capacity limit has been reached, {@code false} otherwise + */ + public boolean isAtFullCapacity() { + if (maxElements == -1) { + // unspecified + return false; + } + return size() == maxElements; + } + + /** The underlying {@link File} backing this queue. */ + public File file() { + return file; + } + + @Override + public void close() throws IOException { + closed = true; + raf.close(); + } + + @Override + public String toString() { + return "QueueFile{" + + "file=" + + file + + ", zero=" + + zero + + ", length=" + + fileLength + + ", size=" + + elementCount + + ", first=" + + first + + ", last=" + + last + + '}'; + } + + /** A pointer to an element. */ + static final class Element { + static final Element NULL = new Element(0, 0); + + /** Length of element header in bytes. */ + static final int HEADER_LENGTH = 4; + + /** Position in file. */ + final long position; + + /** The length of the data. */ + final int length; + + /** + * Constructs a new element. + * + * @param position within file + * @param length of data + */ + Element(long position, int length) { + this.position = position; + this.length = length; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "[position=" + position + ", length=" + length + "]"; + } + } + + /** Fluent API for creating {@link QueueFile} instances. */ + public static final class Builder { + final File file; + boolean zero = true; + int size = -1; + + /** Start constructing a new queue backed by the given file. */ + public Builder(File file) { + if (file == null) { + throw new NullPointerException("file == null"); + } + this.file = file; + } + + /** When true, removing an element will also overwrite data with zero bytes. */ + public Builder zero(boolean zero) { + this.zero = zero; + return this; + } + + /** The maximum number of elements this queue can hold before wrapping around. */ + public Builder size(int size) { + this.size = size; + return this; + } + + /** + * Constructs a new queue backed by the given builder. Only one instance should access a given + * file at a time. + */ + public QueueFile build() throws IOException { + RandomAccessFile raf = initializeFromFile(file); + QueueFile qf = null; + try { + qf = new QueueFile(file, raf, zero, size); + return qf; + } finally { + if (qf == null) { + raf.close(); + } + } + } + } + + /** + * Use this to throw checked exceptions from iterator methods that do not declare that they throw + * checked exceptions. + */ + @SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"}) + static T getSneakyThrowable(Throwable t) throws T { + throw (T) t; + } +} diff --git a/sentry/src/test/java/io/sentry/cache/CacheUtilsTest.kt b/sentry/src/test/java/io/sentry/cache/CacheUtilsTest.kt index ba42810cd95..daf60e679d5 100644 --- a/sentry/src/test/java/io/sentry/cache/CacheUtilsTest.kt +++ b/sentry/src/test/java/io/sentry/cache/CacheUtilsTest.kt @@ -34,6 +34,16 @@ internal class CacheUtilsTest { ) assertEquals("\"Hallo!\"", file.readText()) + + // test overwrite + CacheUtils.store( + SentryOptions().apply { cacheDirPath = cacheDir }, + "Hallo 2!", + "stuff", + "test.json" + ) + + assertEquals("\"Hallo 2!\"", file.readText()) } @Test diff --git a/sentry/src/test/java/io/sentry/cache/PersistingScopeObserverTest.kt b/sentry/src/test/java/io/sentry/cache/PersistingScopeObserverTest.kt index e1927438e59..631e18cf1d5 100644 --- a/sentry/src/test/java/io/sentry/cache/PersistingScopeObserverTest.kt +++ b/sentry/src/test/java/io/sentry/cache/PersistingScopeObserverTest.kt @@ -2,7 +2,6 @@ package io.sentry.cache import io.sentry.Breadcrumb import io.sentry.DateUtils -import io.sentry.JsonDeserializer import io.sentry.Scope import io.sentry.SentryLevel import io.sentry.SentryOptions @@ -56,13 +55,12 @@ class DeletedEntityProvider(private val provider: (Scope) -> T?) { } @RunWith(Parameterized::class) -class PersistingScopeObserverTest( +class PersistingScopeObserverTest( private val entity: T, private val store: StoreScopeValue, private val filename: String, private val delete: DeleteScopeValue, - private val deletedEntity: DeletedEntityProvider, - private val elementDeserializer: JsonDeserializer? + private val deletedEntity: DeletedEntityProvider ) { @get:Rule @@ -89,19 +87,19 @@ class PersistingScopeObserverTest( val sut = fixture.getSut(tmpDir) store(entity, sut, fixture.scope) - val persisted = read() + val persisted = sut.read() assertEquals(entity, persisted) delete(sut, fixture.scope) - val persistedAfterDeletion = read() + val persistedAfterDeletion = sut.read() assertEquals(deletedEntity(fixture.scope), persistedAfterDeletion) } - private fun read(): T? = PersistingScopeObserver.read( + private fun PersistingScopeObserver.read(): Any? = read( fixture.options, filename, - entity!!::class.java, - elementDeserializer + // need to cast breadcrumbs to a regular List, not kotlin lists + if (entity!!::class.java.name.contains("List")) List::class.java else entity!!::class.java ) companion object { @@ -115,8 +113,7 @@ class PersistingScopeObserverTest( StoreScopeValue { user, _ -> setUser(user) }, USER_FILENAME, DeleteScopeValue { setUser(null) }, - DeletedEntityProvider { null }, - null + DeletedEntityProvider { null } ) private fun breadcrumbs(): Array = arrayOf( @@ -124,11 +121,29 @@ class PersistingScopeObserverTest( Breadcrumb.navigation("one", "two"), Breadcrumb.userInteraction("click", "viewId", "viewClass") ), - StoreScopeValue> { breadcrumbs, _ -> setBreadcrumbs(breadcrumbs) }, + StoreScopeValue> { breadcrumbs, _ -> + breadcrumbs.forEach { addBreadcrumb(it) } + }, + BREADCRUMBS_FILENAME, + DeleteScopeValue { setBreadcrumbs(emptyList()) }, + DeletedEntityProvider { emptyList() } + ) + + private fun legacyBreadcrumbs(): Array = arrayOf( + emptyList(), + StoreScopeValue> { _, scope -> + PersistingScopeObserver.store( + scope.options, + listOf( + Breadcrumb.navigation("one", "two"), + Breadcrumb.userInteraction("click", "viewId", "viewClass") + ), + BREADCRUMBS_FILENAME + ) + }, BREADCRUMBS_FILENAME, DeleteScopeValue { setBreadcrumbs(emptyList()) }, - DeletedEntityProvider { emptyList() }, - Breadcrumb.Deserializer() + DeletedEntityProvider { emptyList() } ) private fun tags(): Array = arrayOf( @@ -139,8 +154,7 @@ class PersistingScopeObserverTest( StoreScopeValue> { tags, _ -> setTags(tags) }, TAGS_FILENAME, DeleteScopeValue { setTags(emptyMap()) }, - DeletedEntityProvider { emptyMap() }, - null + DeletedEntityProvider { emptyMap() } ) private fun extras(): Array = arrayOf( @@ -152,8 +166,7 @@ class PersistingScopeObserverTest( StoreScopeValue> { extras, _ -> setExtras(extras) }, EXTRAS_FILENAME, DeleteScopeValue { setExtras(emptyMap()) }, - DeletedEntityProvider { emptyMap() }, - null + DeletedEntityProvider { emptyMap() } ) private fun request(): Array = arrayOf( @@ -168,8 +181,7 @@ class PersistingScopeObserverTest( StoreScopeValue { request, _ -> setRequest(request) }, REQUEST_FILENAME, DeleteScopeValue { setRequest(null) }, - DeletedEntityProvider { null }, - null + DeletedEntityProvider { null } ) private fun fingerprint(): Array = arrayOf( @@ -177,8 +189,7 @@ class PersistingScopeObserverTest( StoreScopeValue> { fingerprint, _ -> setFingerprint(fingerprint) }, FINGERPRINT_FILENAME, DeleteScopeValue { setFingerprint(emptyList()) }, - DeletedEntityProvider { emptyList() }, - null + DeletedEntityProvider { emptyList() } ) private fun level(): Array = arrayOf( @@ -186,8 +197,7 @@ class PersistingScopeObserverTest( StoreScopeValue { level, _ -> setLevel(level) }, LEVEL_FILENAME, DeleteScopeValue { setLevel(null) }, - DeletedEntityProvider { null }, - null + DeletedEntityProvider { null } ) private fun transaction(): Array = arrayOf( @@ -195,8 +205,7 @@ class PersistingScopeObserverTest( StoreScopeValue { transaction, _ -> setTransaction(transaction) }, TRANSACTION_FILENAME, DeleteScopeValue { setTransaction(null) }, - DeletedEntityProvider { null }, - null + DeletedEntityProvider { null } ) private fun trace(): Array = arrayOf( @@ -204,8 +213,7 @@ class PersistingScopeObserverTest( StoreScopeValue { trace, scope -> setTrace(trace, scope) }, TRACE_FILENAME, DeleteScopeValue { scope -> setTrace(null, scope) }, - DeletedEntityProvider { scope -> scope.propagationContext.toSpanContext() }, - null + DeletedEntityProvider { scope -> scope.propagationContext.toSpanContext() } ) private fun contexts(): Array = arrayOf( @@ -269,8 +277,7 @@ class PersistingScopeObserverTest( StoreScopeValue { contexts, _ -> setContexts(contexts) }, CONTEXTS_FILENAME, DeleteScopeValue { setContexts(Contexts()) }, - DeletedEntityProvider { Contexts() }, - null + DeletedEntityProvider { Contexts() } ) private fun replayId(): Array = arrayOf( @@ -278,8 +285,7 @@ class PersistingScopeObserverTest( StoreScopeValue { replayId, _ -> setReplayId(SentryId(replayId)) }, REPLAY_FILENAME, DeleteScopeValue { setReplayId(SentryId.EMPTY_ID) }, - DeletedEntityProvider { SentryId.EMPTY_ID.toString() }, - null + DeletedEntityProvider { SentryId.EMPTY_ID.toString() } ) @JvmStatic @@ -288,6 +294,7 @@ class PersistingScopeObserverTest( return listOf( user(), breadcrumbs(), + legacyBreadcrumbs(), tags(), extras(), request(), diff --git a/sentry/src/test/java/io/sentry/cache/tape/CorruptQueueFileTest.kt b/sentry/src/test/java/io/sentry/cache/tape/CorruptQueueFileTest.kt new file mode 100644 index 00000000000..1e5e0b03a0b --- /dev/null +++ b/sentry/src/test/java/io/sentry/cache/tape/CorruptQueueFileTest.kt @@ -0,0 +1,43 @@ +package io.sentry.cache.tape + +import org.junit.Before +import org.junit.Rule +import org.junit.Test +import org.junit.rules.TemporaryFolder +import java.io.File +import java.nio.file.Files +import java.nio.file.Paths +import kotlin.test.assertEquals + +class CorruptQueueFileTest { + + @get:Rule + val folder = TemporaryFolder() + private lateinit var file: File + + @Before + fun setUp() { + val parent = folder.root + file = File(parent, "queue-file") + } + + @Test + fun `does not fail to operate with a corrupt file`() { + val testFile = this::class.java.classLoader.getResource("corrupt_queue_file.txt")!! + Files.copy(Paths.get(testFile.toURI()), file.outputStream()) + + val queueFile = QueueFile.Builder(file).zero(true).build() + val iterator = queueFile.iterator() + while (iterator.hasNext()) { + iterator.next() + } + + queueFile.add("test".toByteArray()) + assertEquals(1, queueFile.size()) + + queueFile.peek() + + queueFile.remove() + assertEquals(0, queueFile.size()) + } +} diff --git a/sentry/src/test/java/io/sentry/cache/tape/ObjectQueueTest.kt b/sentry/src/test/java/io/sentry/cache/tape/ObjectQueueTest.kt new file mode 100644 index 00000000000..628db5d57bc --- /dev/null +++ b/sentry/src/test/java/io/sentry/cache/tape/ObjectQueueTest.kt @@ -0,0 +1,252 @@ +/* + * Adapted from: https://github.com/square/tape/tree/445cd3fd0a7b3ec48c9ea3e0e86663fe6d3735d8/tape/src/test/java/com/squareup/tape2 + * + * Copyright (C) 2010 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.sentry.cache.tape + +import io.sentry.cache.tape.ObjectQueue.Converter +import io.sentry.cache.tape.QueueFile.Builder +import org.junit.Before +import org.junit.Rule +import org.junit.Test +import org.junit.rules.TemporaryFolder +import java.io.File +import java.io.IOException +import java.io.OutputStream +import kotlin.test.assertEquals +import kotlin.test.assertFalse +import kotlin.test.assertTrue +import kotlin.test.fail + +class ObjectQueueTest { + enum class QueueFactory { + FILE { + override fun create(queueFile: QueueFile, converter: Converter): ObjectQueue { + return ObjectQueue.create(queueFile, converter) + } + }; + + abstract fun create(queueFile: QueueFile, converter: Converter): ObjectQueue + } + + @get:Rule + val folder = TemporaryFolder() + private lateinit var queue: ObjectQueue + + @Before + fun setUp() { + val parent = folder.root + val file = File(parent, "object-queue") + val queueFile = Builder(file).build() + queue = QueueFactory.FILE.create(queueFile, StringConverter()) + + queue.add("one") + queue.add("two") + queue.add("three") + } + + @Test + fun size() { + assertEquals(queue.size(), 3) + } + + @Test + fun peek() { + assertEquals(queue.peek(), "one") + } + + @Test + fun peekMultiple() { + assertEquals(queue.peek(2), listOf("one", "two")) + } + + @Test + fun peekMaxCanExceedQueueDepth() { + assertEquals(queue.peek(6), listOf("one", "two", "three")) + } + + @Test + fun asList() { + assertEquals(queue.asList(), listOf("one", "two", "three")) + } + + @Test + fun remove() { + queue.remove() + + assertEquals(queue.asList(), listOf("two", "three")) + } + + @Test + fun removeMultiple() { + queue.remove(2) + + assertEquals(queue.asList(), listOf("three")) + } + + @Test + fun clear() { + queue.clear() + + assertEquals(queue.size(), 0) + } + + @Test + fun isEmpty() { + assertFalse(queue.isEmpty) + + queue.clear() + + assertTrue(queue.isEmpty) + } + + @Test + fun testIterator() { + val saw: MutableList = ArrayList() + for (pojo in queue) { + saw.add(pojo) + } + assertEquals(saw, listOf("one", "two", "three")) + } + + @Test + fun testIteratorNextThrowsWhenEmpty() { + queue.clear() + val iterator: Iterator = queue.iterator() + + try { + iterator.next() + fail() + } catch (ignored: NoSuchElementException) { + } + } + + @Test + fun testIteratorNextThrowsWhenExhausted() { + val iterator: Iterator = queue.iterator() + iterator.next() + iterator.next() + iterator.next() + + try { + iterator.next() + fail() + } catch (ignored: NoSuchElementException) { + } + } + + @Test + fun testIteratorRemove() { + val iterator = queue.iterator() + + iterator.next() + iterator.remove() + assertEquals(queue.asList(), listOf("two", "three")) + + iterator.next() + iterator.remove() + assertEquals(queue.asList(), listOf("three")) + } + + @Test + fun testIteratorRemoveDisallowsConcurrentModification() { + val iterator = queue.iterator() + iterator.next() + queue.remove() + + try { + iterator.remove() + fail() + } catch (ignored: ConcurrentModificationException) { + } + } + + @Test + fun testIteratorHasNextDisallowsConcurrentModification() { + val iterator: Iterator = queue.iterator() + iterator.next() + queue.remove() + + try { + iterator.hasNext() + fail() + } catch (ignored: ConcurrentModificationException) { + } + } + + @Test + fun testIteratorDisallowsConcurrentModificationWithClear() { + val iterator: Iterator = queue.iterator() + iterator.next() + queue.clear() + + try { + iterator.hasNext() + fail() + } catch (ignored: ConcurrentModificationException) { + } + } + + @Test + fun testIteratorOnlyRemovesFromHead() { + val iterator = queue.iterator() + iterator.next() + iterator.next() + + try { + iterator.remove() + fail() + } catch (ex: UnsupportedOperationException) { + assertEquals(ex.message, "Removal is only permitted from the head.") + } + } + + @Test + fun iteratorThrowsIOException() { + val parent = folder.root + val file = File(parent, "object-queue") + val queueFile = Builder(file).build() + val queue = ObjectQueue.create( + queueFile, + object : Converter { + override fun from(bytes: ByteArray): String { + throw IOException() + } + + override fun toStream(o: Any, bytes: OutputStream) { + } + } + ) + queue.add(Any()) + val iterator = queue.iterator() + try { + iterator.next() + fail() + } catch (ioe: Exception) { + assertTrue(ioe is IOException) + } + } + + internal class StringConverter : Converter { + override fun from(bytes: ByteArray): String { + return String(bytes, charset("UTF-8")) + } + + override fun toStream(s: String, os: OutputStream) { + os.write(s.toByteArray(charset("UTF-8"))) + } + } +} diff --git a/sentry/src/test/java/io/sentry/cache/tape/QueueFileTest.kt b/sentry/src/test/java/io/sentry/cache/tape/QueueFileTest.kt new file mode 100644 index 00000000000..8ece592c684 --- /dev/null +++ b/sentry/src/test/java/io/sentry/cache/tape/QueueFileTest.kt @@ -0,0 +1,730 @@ +/* + * Adapted from: https://github.com/square/tape/tree/445cd3fd0a7b3ec48c9ea3e0e86663fe6d3735d8/tape/src/test/java/com/squareup/tape2 + * + * Copyright (C) 2010 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.sentry.cache.tape + +import io.sentry.cache.tape.QueueFile.Builder +import io.sentry.cache.tape.QueueFile.Element +import okio.BufferedSource +import okio.Okio +import org.junit.Assert +import org.junit.Assert.assertArrayEquals +import org.junit.Before +import org.junit.Rule +import org.junit.Test +import org.junit.rules.TemporaryFolder +import java.io.File +import java.io.IOException +import java.io.RandomAccessFile +import java.util.ArrayDeque +import java.util.Queue +import java.util.logging.Logger +import kotlin.test.assertEquals +import kotlin.test.assertNull +import kotlin.test.assertTrue +import kotlin.test.fail + +/** + * Tests for QueueFile. + * + * @author Bob Lee (bob@squareup.com) + */ +class QueueFileTest { + private val headerLength = 32 + + @get:Rule + val folder = TemporaryFolder() + private lateinit var file: File + + private fun newQueueFile(raf: RandomAccessFile): QueueFile { + return QueueFile(this.file, raf, true, -1) + } + + private fun newQueueFile(zero: Boolean = true, size: Int = -1): QueueFile { + return Builder(file).zero(zero).size(size).build() + } + + @Before + fun setUp() { + val parent = folder.root + file = File(parent, "queue-file") + } + + @Test + fun testAddOneElement() { + // This test ensures that we update 'first' correctly. + var queue = newQueueFile() + val expected = values[253] + queue.add(expected) + assertArrayEquals(queue.peek(), expected) + queue.close() + queue = newQueueFile() + assertArrayEquals(queue.peek(), expected) + } + + @Test + fun testClearErases() { + val queue = newQueueFile() + val expected = values[253] + queue.add(expected) + + // Confirm that the data was in the file before we cleared. + val data = ByteArray(expected!!.size) + queue.raf.seek(headerLength.toLong() + Element.HEADER_LENGTH) + queue.raf.readFully(data, 0, expected.size) + assertArrayEquals(data, expected) + + queue.clear() + + // Should have been erased. + queue.raf.seek(headerLength.toLong() + Element.HEADER_LENGTH) + queue.raf.readFully(data, 0, expected.size) + assertArrayEquals(data, ByteArray(expected.size)) + } + + @Test + fun testClearDoesNotCorrupt() { + var queue = newQueueFile() + val stuff = values[253] + queue.add(stuff) + queue.clear() + + queue = newQueueFile() + assertTrue(queue.isEmpty) + assertNull(queue.peek()) + + queue.add(values[25]) + assertArrayEquals(queue.peek(), values[25]) + } + + @Test + fun removeErasesEagerly() { + val queue = newQueueFile() + + val firstStuff = values[127] + queue.add(firstStuff) + + val secondStuff = values[253] + queue.add(secondStuff) + + // Confirm that first stuff was in the file before we remove. + val data = ByteArray(firstStuff!!.size) + queue.raf.seek((headerLength + Element.HEADER_LENGTH).toLong()) + queue.raf.readFully(data, 0, firstStuff.size) + assertArrayEquals(data, firstStuff) + + queue.remove() + + // Next record is intact + assertArrayEquals(queue.peek(), secondStuff) + + // First should have been erased. + queue.raf.seek((headerLength + Element.HEADER_LENGTH).toLong()) + queue.raf.readFully(data, 0, firstStuff.size) + assertArrayEquals(data, ByteArray(firstStuff.size)) + } + + @Test + fun testZeroSizeInHeaderThrows() { + val emptyFile = RandomAccessFile(file, "rwd") + emptyFile.setLength(QueueFile.INITIAL_LENGTH.toLong()) + emptyFile.channel.force(true) + emptyFile.close() + + try { + newQueueFile() + fail("Should have thrown about bad header length") + } catch (ex: IOException) { + assertEquals(ex.message, "File is corrupt; length stored in header (0) is invalid.") + } + } + + @Test + fun testSizeLessThanHeaderThrows() { + val emptyFile = RandomAccessFile(file, "rwd") + emptyFile.setLength(QueueFile.INITIAL_LENGTH.toLong()) + emptyFile.writeInt(-0x7fffffff) + emptyFile.writeLong((headerLength - 1).toLong()) + emptyFile.channel.force(true) + emptyFile.close() + + try { + newQueueFile() + fail() + } catch (ex: IOException) { + assertEquals(ex.message, "File is corrupt; length stored in header (31) is invalid.") + } + } + + @Test + fun testNegativeSizeInHeaderThrows() { + val emptyFile = RandomAccessFile(file, "rwd") + emptyFile.seek(0) + emptyFile.writeInt(-2147483648) + emptyFile.setLength(QueueFile.INITIAL_LENGTH.toLong()) + emptyFile.channel.force(true) + emptyFile.close() + + try { + newQueueFile() + fail("Should have thrown about bad header length") + } catch (ex: IOException) { + assertEquals(ex.message, "File is corrupt; length stored in header (0) is invalid.") + } + } + + @Test + fun removeMultipleDoesNotCorrupt() { + var queue = newQueueFile() + for (i in 0..9) { + queue.add(values[i]) + } + + queue.remove(1) + assertEquals(queue.size(), 9) + assertArrayEquals(queue.peek(), values[1]) + + queue.remove(3) + queue = newQueueFile() + assertEquals(queue.size(), 6) + assertArrayEquals(queue.peek(), values[4]) + + queue.remove(6) + assertTrue(queue.isEmpty) + assertNull(queue.peek()) + } + + @Test + fun removeDoesNotCorrupt() { + var queue = newQueueFile() + + queue.add(values[127]) + val secondStuff = values[253] + queue.add(secondStuff) + queue.remove() + + queue = newQueueFile() + assertArrayEquals(queue.peek(), secondStuff) + } + + @Test + fun removeFromEmptyFileThrows() { + val queue = newQueueFile() + + try { + queue.remove() + fail("Should have thrown about removing from empty file.") + } catch (ignored: NoSuchElementException) { + } + } + + @Test + fun removeZeroFromEmptyFileDoesNothing() { + val queue = newQueueFile() + queue.remove(0) + assertTrue(queue.isEmpty) + } + + @Test + fun removeNegativeNumberOfElementsThrows() { + val queue = newQueueFile() + queue.add(values[127]) + + try { + queue.remove(-1) + fail("Should have thrown about removing negative number of elements.") + } catch (ex: IllegalArgumentException) { + assertEquals(ex.message, "Cannot remove negative (-1) number of elements.") + } + } + + @Test + fun removeZeroElementsDoesNothing() { + val queue = newQueueFile() + queue.add(values[127]) + + queue.remove(0) + assertEquals(queue.size(), 1) + } + + @Test + fun removeBeyondQueueSizeElementsThrows() { + val queue = newQueueFile() + queue.add(values[127]) + + try { + queue.remove(10) + fail("Should have thrown about removing too many elements.") + } catch (ex: IllegalArgumentException) { + assertEquals(ex.message, "Cannot remove more elements (10) than present in queue (1).") + } + } + + @Test + fun removingBigDamnBlocksErasesEffectively() { + val bigBoy = ByteArray(7000) + var i = 0 + while (i < 7000) { + System.arraycopy(values[100], 0, bigBoy, i, values[100]!!.size) + i += 100 + } + + val queue = newQueueFile() + + queue.add(bigBoy) + val secondStuff = values[123] + queue.add(secondStuff) + + // Confirm that bigBoy was in the file before we remove. + val data = ByteArray(bigBoy.size) + queue.raf.seek((headerLength + Element.HEADER_LENGTH).toLong()) + queue.raf.readFully(data, 0, bigBoy.size) + assertArrayEquals(data, bigBoy) + + queue.remove() + + // Next record is intact + assertArrayEquals(queue.peek(), secondStuff) + + // First should have been erased. + queue.raf.seek((headerLength + Element.HEADER_LENGTH).toLong()) + queue.raf.readFully(data, 0, bigBoy.size) + assertArrayEquals(data, ByteArray(bigBoy.size)) + } + + @Test + fun testAddAndRemoveElements() { + val start = System.nanoTime() + + val expected: Queue = ArrayDeque() + + for (round in 0..4) { + val queue = newQueueFile() + for (i in 0 until N) { + queue.add(values[i]) + expected.add(values[i]) + } + + // Leave N elements in round N, 15 total for 5 rounds. Removing all the + // elements would be like starting with an empty queue. + for (i in 0 until N - round - 1) { + assertArrayEquals(queue.peek(), expected.remove()) + queue.remove() + } + queue.close() + } + + // Remove and validate remaining 15 elements. + val queue = newQueueFile() + assertEquals(queue.size(), 15) + assertEquals(queue.size(), expected.size) + while (!expected.isEmpty()) { + assertArrayEquals(queue.peek(), expected.remove()) + queue.remove() + } + queue.close() + + // length() returns 0, but I checked the size w/ 'ls', and it is correct. + // assertEquals(65536, file.length()); + logger.info("Ran in " + ((System.nanoTime() - start) / 1000000) + "ms.") + } + + @Test + fun testFailedAdd() { + var queueFile = newQueueFile() + queueFile.add(values[253]) + queueFile.close() + + val braf = BrokenRandomAccessFile(file, "rwd") + queueFile = newQueueFile(braf) + + try { + queueFile.add(values[252]) + Assert.fail() + } catch (e: IOException) { /* expected */ + } + + braf.rejectCommit = false + + // Allow a subsequent add to succeed. + queueFile.add(values[251]) + + queueFile.close() + + queueFile = newQueueFile() + assertEquals(queueFile.size(), 2) + assertArrayEquals(queueFile.peek(), values[253]) + queueFile.remove() + assertArrayEquals(queueFile.peek(), values[251]) + } + + @Test + fun testFailedRemoval() { + var queueFile = newQueueFile() + queueFile.add(values[253]) + queueFile.close() + + val braf = BrokenRandomAccessFile(file, "rwd") + queueFile = newQueueFile(braf) + + try { + queueFile.remove() + fail() + } catch (e: IOException) { /* expected */ + } + + queueFile.close() + + queueFile = newQueueFile() + assertEquals(queueFile.size(), 1) + assertArrayEquals(queueFile.peek(), values[253]) + + queueFile.add(values[99]) + queueFile.remove() + assertArrayEquals(queueFile.peek(), values[99]) + } + + @Test + fun testFailedExpansion() { + var queueFile = newQueueFile() + queueFile.add(values[253]) + queueFile.close() + + val braf = BrokenRandomAccessFile(file, "rwd") + queueFile = newQueueFile(braf) + + try { + // This should trigger an expansion which should fail. + queueFile.add(ByteArray(8000)) + fail() + } catch (e: IOException) { /* expected */ + } + + queueFile.close() + + queueFile = newQueueFile() + assertEquals(queueFile.size(), 1) + assertArrayEquals(queueFile.peek(), values[253]) + assertEquals(queueFile.fileLength, 4096) + + queueFile.add(values[99]) + queueFile.remove() + assertArrayEquals(queueFile.peek(), values[99]) + } + + @Test + fun removingElementZeroesData() { + val queueFile = newQueueFile(true) + queueFile.add(values[4]) + queueFile.remove() + queueFile.close() + + val source: BufferedSource = Okio.buffer(Okio.source(file)) + source.skip(headerLength.toLong()) + source.skip(Element.HEADER_LENGTH.toLong()) + assertEquals(source.readByteString(4).hex(), "00000000") + } + + @Test + fun removingElementDoesNotZeroData() { + val queueFile = newQueueFile(false) + queueFile.add(values[4]) + queueFile.remove() + queueFile.close() + + val source: BufferedSource = Okio.buffer(Okio.source(file)) + source.skip(headerLength.toLong()) + source.skip(Element.HEADER_LENGTH.toLong()) + assertEquals(source.readByteString(4).hex(), "04030201") + + source.close() + } + + /** + * Exercise a bug where opening a queue whose first or last element's header + * was non contiguous throws an [java.io.EOFException]. + */ + @Test + fun testReadHeadersFromNonContiguousQueueWorks() { + val queueFile = newQueueFile() + + // Fill the queue up to `length - 2` (i.e. remainingBytes() == 2). + for (i in 0..14) { + queueFile.add(values[N - 1]) + } + queueFile.add(values[219]) + + // Remove first item so we have room to add another one without growing the file. + queueFile.remove() + + // Add any element element and close the queue. + queueFile.add(values[6]) + val queueSize = queueFile.size() + queueFile.close() + + // File should not be corrupted. + val queueFile2 = newQueueFile() + assertEquals(queueFile2.size(), queueSize) + } + + @Test + fun testIterator() { + val data = values[10] + + for (i in 0..9) { + val queueFile = newQueueFile() + for (j in 0 until i) { + queueFile.add(data) + } + + var saw = 0 + for (element in queueFile) { + assertArrayEquals(element, data) + saw++ + } + assertEquals(saw, i) + queueFile.close() + file!!.delete() + } + } + + @Test + fun testIteratorNextThrowsWhenEmpty() { + val queueFile = newQueueFile() + + val iterator: Iterator = queueFile.iterator() + + try { + iterator.next() + fail() + } catch (ignored: NoSuchElementException) { + } + } + + @Test + fun testIteratorNextThrowsWhenExhausted() { + val queueFile = newQueueFile() + queueFile.add(values[0]) + + val iterator: Iterator = queueFile.iterator() + iterator.next() + + try { + iterator.next() + fail() + } catch (ignored: NoSuchElementException) { + } + } + + @Test + fun testIteratorRemove() { + val queueFile = newQueueFile() + for (i in 0..14) { + queueFile.add(values[i]) + } + + val iterator = queueFile.iterator() + while (iterator.hasNext()) { + iterator.next() + iterator.remove() + } + + assertTrue(queueFile.isEmpty) + } + + @Test + fun testIteratorRemoveDisallowsConcurrentModification() { + val queueFile = newQueueFile() + for (i in 0..14) { + queueFile.add(values[i]) + } + + val iterator = queueFile.iterator() + iterator.next() + queueFile.remove() + try { + iterator.remove() + fail() + } catch (ignored: ConcurrentModificationException) { + } + } + + @Test + fun testIteratorHasNextDisallowsConcurrentModification() { + val queueFile = newQueueFile() + for (i in 0..14) { + queueFile.add(values[i]) + } + + val iterator: Iterator = queueFile.iterator() + iterator.next() + queueFile.remove() + try { + iterator.hasNext() + fail() + } catch (ignored: ConcurrentModificationException) { + } + } + + @Test + fun testIteratorDisallowsConcurrentModificationWithClear() { + val queueFile = newQueueFile() + for (i in 0..14) { + queueFile.add(values[i]) + } + + val iterator: Iterator = queueFile.iterator() + iterator.next() + queueFile.clear() + try { + iterator.hasNext() + fail() + } catch (ignored: ConcurrentModificationException) { + } + } + + @Test + fun testIteratorOnlyRemovesFromHead() { + val queueFile = newQueueFile() + for (i in 0..14) { + queueFile.add(values[i]) + } + + val iterator = queueFile.iterator() + iterator.next() + iterator.next() + + try { + iterator.remove() + fail() + } catch (ex: UnsupportedOperationException) { + assertEquals(ex.message, "Removal is only permitted from the head.") + } + } + + @Test + fun iteratorThrowsIOException() { + var queueFile = newQueueFile() + queueFile.add(values[253]) + queueFile.close() + + class BrokenRandomAccessFile(file: File?, mode: String?) : RandomAccessFile(file, mode) { + var fail: Boolean = false + + override fun write(b: ByteArray, off: Int, len: Int) { + if (fail) { + throw IOException() + } + super.write(b, off, len) + } + + override fun read(b: ByteArray, off: Int, len: Int): Int { + if (fail) { + throw IOException() + } + return super.read(b, off, len) + } + } + + val braf = BrokenRandomAccessFile(file, "rwd") + queueFile = newQueueFile(braf) + val iterator = queueFile.iterator() + + braf.fail = true + try { + iterator.next() + fail() + } catch (ioe: Exception) { + assertTrue(ioe is IOException) + } + + braf.fail = false + iterator.next() + + braf.fail = true + try { + iterator.remove() + fail() + } catch (ioe: Exception) { + assertTrue(ioe is IOException) + } + } + + @Test + fun queueToString() { + val queueFile = newQueueFile() + for (i in 0..14) { + queueFile.add(values[i]) + } + + assertTrue( + queueFile.toString().contains( + "zero=true, length=4096," + + " size=15," + + " first=Element[position=32, length=0], last=Element[position=179, length=14]}" + ) + ) + } + + @Test + fun `wraps elements around when size is specified`() { + val queue = newQueueFile(size = 2) + + for (i in 0 until 3) { + queue.add(values[i]) + } + + // Confirm that first element now is values[1] in the file after wrapping + assertArrayEquals(queue.peek(), values[1]) + queue.remove() + + // Confirm that first element now is values[2] in the file after wrapping + assertArrayEquals(queue.peek(), values[2]) + } + + /** + * A RandomAccessFile that can break when you go to write the COMMITTED + * status. + */ + internal class BrokenRandomAccessFile(file: File?, mode: String?) : RandomAccessFile(file, mode) { + var rejectCommit: Boolean = true + override fun write(b: ByteArray, off: Int, len: Int) { + if (rejectCommit && filePointer == 0L) { + throw IOException("No commit for you!") + } + super.write(b, off, len) + } + } + + companion object { + private val logger: Logger = Logger.getLogger( + QueueFileTest::class.java.name + ) + + /** + * Takes up 33401 bytes in the queue (N*(N+1)/2+4*N). Picked 254 instead of 255 so that the number + * of bytes isn't a multiple of 4. + */ + private const val N = 254 + private val values = Array(N) { i -> + val value = ByteArray(i) + // Example: values[3] = { 3, 2, 1 } + for (ii in 0 until i) value[ii] = (i - ii).toByte() + value + } + } +} diff --git a/sentry/src/test/resources/corrupt_queue_file.txt b/sentry/src/test/resources/corrupt_queue_file.txt new file mode 100644 index 00000000000..2eca21fb255 Binary files /dev/null and b/sentry/src/test/resources/corrupt_queue_file.txt differ