diff --git a/sdk/cosmos/azure-cosmos-dotnet-benchmark/src/test/java/BasicOperationTest.java b/sdk/cosmos/azure-cosmos-dotnet-benchmark/src/test/java/BasicOperationTest.java index d52e57ed075c1..946be9691d8c0 100644 --- a/sdk/cosmos/azure-cosmos-dotnet-benchmark/src/test/java/BasicOperationTest.java +++ b/sdk/cosmos/azure-cosmos-dotnet-benchmark/src/test/java/BasicOperationTest.java @@ -34,7 +34,7 @@ public class BasicOperationTest { private CosmosContainer createdTestContainer; private CosmosContainer createdResultsContainer; - @BeforeClass(groups = {"emulator"}, timeOut = TIMEOUT) + @BeforeClass(groups = {"simple"}, timeOut = TIMEOUT) public void before_BasicOperationTest() { assertThat(this.client).isNull(); CosmosClientBuilder clientBuilder = new CosmosClientBuilder() @@ -60,7 +60,7 @@ public void before_BasicOperationTest() { this.createdResultsContainer = this.createdDatabase.getContainer("results" + suffix); } - @AfterClass(groups = {"emulator"}, timeOut = TIMEOUT, alwaysRun = true) + @AfterClass(groups = {"simple"}, timeOut = TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); assertThat(this.createdDatabase).isNotNull(); diff --git a/sdk/cosmos/azure-cosmos-spark_3-1_2-12/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_3-1_2-12/CHANGELOG.md index 22255de9047df..e2cdabcbcef42 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-1_2-12/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_3-1_2-12/CHANGELOG.md @@ -3,6 +3,7 @@ ### 4.14.0-beta.1 (Unreleased) #### Features Added +* Added new config options for Change Feed Modes, Incremental as `LatestVersion` and Full Fidelity as `AllVersionsAndDeletes` changes - See [PR 30399](https://github.com/Azure/azure-sdk-for-java/pull/30399) * Added option to emit client-side metrics via micrometer.io MeterRegistry. - See [PR 30065](https://github.com/Azure/azure-sdk-for-java/pull/30065) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos-spark_3-2_2-12/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_3-2_2-12/CHANGELOG.md index a83efe573a8e4..d109b61ef26dc 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-2_2-12/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_3-2_2-12/CHANGELOG.md @@ -3,6 +3,7 @@ ### 4.14.0-beta.1 (Unreleased) #### Features Added +* Added new config options for Change Feed Modes, Incremental as `LatestVersion` and Full Fidelity as `AllVersionsAndDeletes` changes - See [PR 30399](https://github.com/Azure/azure-sdk-for-java/pull/30399) * Added option to emit client-side metrics via micrometer.io MeterRegistry. - See [PR 30065](https://github.com/Azure/azure-sdk-for-java/pull/30065) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/docs/configuration-reference.md b/sdk/cosmos/azure-cosmos-spark_3_2-12/docs/configuration-reference.md index f2b324e3dd849..d293501920af7 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/docs/configuration-reference.md +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/docs/configuration-reference.md @@ -64,12 +64,12 @@ Used to influence the json serialization/deserialization behavior | `spark.cosmos.serialization.dateTimeConversionMode` | `Default` | The date/time conversion mode (`Default`, `AlwaysEpochMilliseconds`, `AlwaysEpochMillisecondsWithSystemDefaultTimezone`). With `Default` the standard Spark 3.* behavior is used (`java.sql.Date`/`java.time.LocalDate` are converted to EpochDay, `java.sql.Timestamp`/`java.time.Instant` are converted to MicrosecondsFromEpoch). With `AlwaysEpochMilliseconds` the same behavior the Cosmos DB connector for Spark 2.4 used is applied - `java.sql.Date`, `java.time.LocalDate`, `java.sql.Timestamp` and `java.time.Instant` are converted to MillisecondsFromEpoch. The behavior for `AlwaysEpochMillisecondsWithSystemDefaultTimezone` is identical with `AlwaysEpochMilliseconds` except that it will assume System default time zone / Spark session time zone (specified via `spark.sql.session.timezone`) instead of UTC when the date/time to be parsed has no explicit time zone.| #### Change feed (only for Spark-Streaming using `cosmos.oltp.changeFeed` data source, which is read-only) configuration -| Config Property Name | Default | Description | -| :--- | :---- | :--- | -| `spark.cosmos.changeFeed.startFrom` | `Beginning` | ChangeFeed Start from settings (`Now`, `Beginning` or a certain point in time (UTC) for example `2020-02-10T14:15:03`) - the default value is `Beginning`. If the write config contains a `checkpointLocation` and any checkpoints exist, the stream is always continued independent of the `spark.cosmos.changeFeed.startFrom` settings - you need to change `checkpointLocation` or delete checkpoints to restart the stream if that is the intention. | -| `spark.cosmos.changeFeed.mode` | `Incremental` | ChangeFeed mode (`Incremental` or `FullFidelity`) - NOTE: `FullFidelity` is in experimental state right now. It requires that the subscription/account has been enabled for the private preview and there are known breaking changes that will happen for `FullFidelity` (schema of the returned documents). It is recommended to only use `FullFidelity` for non-production scenarios at this point. | -| `spark.cosmos.changeFeed.itemCountPerTriggerHint` | None | Approximate maximum number of items read from change feed for each micro-batch/trigger | -| `spark.cosmos.changeFeed.batchCheckpointLocation` | None | Can be used to generate checkpoints when using change feed queries in batch mode - and proceeding on the next iteration where the previous left off. | +| Config Property Name | Default | Description | +| :--- |:----------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `spark.cosmos.changeFeed.startFrom` | `Beginning` | ChangeFeed Start from settings (`Now`, `Beginning` or a certain point in time (UTC) for example `2020-02-10T14:15:03`) - the default value is `Beginning`. If the write config contains a `checkpointLocation` and any checkpoints exist, the stream is always continued independent of the `spark.cosmos.changeFeed.startFrom` settings - you need to change `checkpointLocation` or delete checkpoints to restart the stream if that is the intention. | +| `spark.cosmos.changeFeed.mode` | `Incremental/LatestVersion` | ChangeFeed mode (`Incremental/LatestVersion` or `FullFidelity/AllVersionsAndDeletes`) - NOTE: `FullFidelity/AllVersionsAndDeletes` is in experimental state right now. It requires that the subscription/account has been enabled for the private preview and there are known breaking changes that will happen for `FullFidelity/AllVersionsAndDeletes` (schema of the returned documents). It is recommended to only use `FullFidelity/AllVersionsAndDeletes` for non-production scenarios at this point. | +| `spark.cosmos.changeFeed.itemCountPerTriggerHint` | None | Approximate maximum number of items read from change feed for each micro-batch/trigger | +| `spark.cosmos.changeFeed.batchCheckpointLocation` | None | Can be used to generate checkpoints when using change feed queries in batch mode - and proceeding on the next iteration where the previous left off. | #### Json conversion configuration | Config Property Name | Default | Description | diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedPartitionReader.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedPartitionReader.scala index 592560612d354..a7d2c3f916792 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedPartitionReader.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedPartitionReader.scala @@ -81,9 +81,9 @@ private case class ChangeFeedPartitionReader var factoryMethod: java.util.function.Function[JsonNode, _] = (_: JsonNode) => {} cosmosChangeFeedConfig.changeFeedMode match { - case ChangeFeedModes.Incremental => + case ChangeFeedModes.Incremental | ChangeFeedModes.LatestVersion => factoryMethod = (jsonNode: JsonNode) => changeFeedItemFactoryMethod(jsonNode) - case ChangeFeedModes.FullFidelity => + case ChangeFeedModes.FullFidelity | ChangeFeedModes.AllVersionsAndDeletes => factoryMethod = (jsonNode: JsonNode) => changeFeedItemFactoryMethodV1(jsonNode) } diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedTable.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedTable.scala index 25e2a7342b8dc..abeeb86d1204d 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedTable.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedTable.scala @@ -119,9 +119,9 @@ private class ChangeFeedTable(val session: SparkSession, userConfig: Map[String, String]): StructType = { val defaultSchema: StructType = changeFeedConfig.changeFeedMode match { - case ChangeFeedModes.Incremental => + case ChangeFeedModes.Incremental | ChangeFeedModes.LatestVersion => ChangeFeedTable.defaultIncrementalChangeFeedSchemaForInferenceDisabled - case ChangeFeedModes.FullFidelity => + case ChangeFeedModes.FullFidelity | ChangeFeedModes.AllVersionsAndDeletes => ChangeFeedTable.defaultFullFidelityChangeFeedSchemaForInferenceDisabled } diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala index 0162986cfe1e0..3fe8caecf5f9f 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala @@ -1078,6 +1078,8 @@ private object ChangeFeedModes extends Enumeration { val Incremental: ChangeFeedModes.Value = Value("Incremental") val FullFidelity: ChangeFeedModes.Value = Value("FullFidelity") + val LatestVersion: ChangeFeedModes.Value = Value("LatestVersion") + val AllVersionsAndDeletes: ChangeFeedModes.Value = Value("AllVersionsAndDeletes") } private object ChangeFeedStartFromModes extends Enumeration { @@ -1108,8 +1110,8 @@ private case class CosmosChangeFeedConfig } this.changeFeedMode match { - case ChangeFeedModes.Incremental => options - case ChangeFeedModes.FullFidelity => options.fullFidelity() + case ChangeFeedModes.Incremental | ChangeFeedModes.LatestVersion => options + case ChangeFeedModes.FullFidelity | ChangeFeedModes.AllVersionsAndDeletes => options.allVersionsAndDeletes() } } @@ -1148,7 +1150,7 @@ private object CosmosChangeFeedConfig { mandatory = false, defaultValue = Some(ChangeFeedModes.Incremental), parseFromStringFunction = changeFeedModeString => CosmosConfigEntry.parseEnumeration(changeFeedModeString, ChangeFeedModes), - helpMessage = "ChangeFeed mode (Incremental or FullFidelity)") + helpMessage = "ChangeFeed mode (Incremental/LatestVersion or FullFidelity/AllVersionsAndDeletes)") private val maxItemCountPerTriggerHint = CosmosConfigEntry[Long]( key = CosmosConfigNames.ChangeFeedItemCountPerTriggerHint, diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala index 2b7e67cd393d9..56c1de0dfda6f 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala @@ -538,7 +538,22 @@ class CosmosConfigSpec extends UnitSpec { config.maxItemCountPerTrigger.get shouldEqual 54 } - it should "parse change feed config with PIT start mode" in { + it should "parse change feed config for all versions and deletes with incorrect casing" in { + val changeFeedConfig = Map( + "spark.cosmos.changeFeed.mode" -> "AllVersionsANDDELETES", + "spark.cosmos.changeFeed.STARTfrom" -> "NOW", + "spark.cosmos.changeFeed.itemCountPerTriggerHint" -> "54" + ) + + val config = CosmosChangeFeedConfig.parseCosmosChangeFeedConfig(changeFeedConfig) + + config.changeFeedMode shouldEqual ChangeFeedModes.AllVersionsAndDeletes + config.startFrom shouldEqual ChangeFeedStartFromModes.Now + config.startFromPointInTime shouldEqual None + config.maxItemCountPerTrigger.get shouldEqual 54 + } + + it should "parse change feed config (incremental) with PIT start mode" in { val changeFeedConfig = Map( "spark.cosmos.changeFeed.mode" -> "incremental", "spark.cosmos.changeFeed.STARTfrom" -> "2019-12-31T10:45:10Z", @@ -556,6 +571,24 @@ class CosmosConfigSpec extends UnitSpec { config.maxItemCountPerTrigger.get shouldEqual 54 } + it should "parse change feed config (latestversion) with PIT start mode" in { + val changeFeedConfig = Map( + "spark.cosmos.changeFeed.mode" -> "latestversion", + "spark.cosmos.changeFeed.STARTfrom" -> "2019-12-31T10:45:10Z", + "spark.cosmos.changeFeed.itemCountPerTriggerHint" -> "54" + ) + + val config = CosmosChangeFeedConfig.parseCosmosChangeFeedConfig(changeFeedConfig) + + config.changeFeedMode shouldEqual ChangeFeedModes.LatestVersion + config.startFrom shouldEqual ChangeFeedStartFromModes.PointInTime + Instant.from(config.startFromPointInTime.get) shouldEqual + new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXXX") + .parse("2019-12-31T10:45:10Z") + .toInstant + config.maxItemCountPerTrigger.get shouldEqual 54 + } + it should "complain when parsing invalid change feed mode" in { val changeFeedConfig = Map( "spark.cosmos.changeFeed.mode" -> "Whatever", @@ -569,7 +602,7 @@ class CosmosConfigSpec extends UnitSpec { } catch { case e: Exception => e.getMessage shouldEqual "invalid configuration for spark.cosmos.changeFeed.mode:Whatever. Config description: " + - "ChangeFeed mode (Incremental or FullFidelity)" + "ChangeFeed mode (Incremental/LatestVersion or FullFidelity/AllVersionsAndDeletes)" } } diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/PartitionMetadataSpec.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/PartitionMetadataSpec.scala index 9f3b4b4e9230e..1dc7a8b853468 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/PartitionMetadataSpec.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/PartitionMetadataSpec.scala @@ -42,7 +42,7 @@ class PartitionMetadataSpec extends UnitSpec { key shouldEqual s"$databaseName/$collectionName/${normalizedRange.min}-${normalizedRange.max}" } - it should "create instance with valid parameters via apply" in { + it should "create instance with valid parameters via apply in incremental mode" in { val clientConfig = CosmosClientConfiguration( UUID.randomUUID().toString, @@ -92,7 +92,79 @@ class PartitionMetadataSpec extends UnitSpec { docCount, docSizeInKB, firstLsn, - createChangeFeedState(latestLsn)) + createChangeFeedState(latestLsn, "INCREMENTAL")) + + viaCtor.cosmosClientConfig should be theSameInstanceAs viaApply.cosmosClientConfig + viaCtor.cosmosClientConfig should be theSameInstanceAs clientConfig + viaCtor.cosmosContainerConfig should be theSameInstanceAs viaApply.cosmosContainerConfig + viaCtor.cosmosContainerConfig should be theSameInstanceAs containerConfig + viaCtor.feedRange shouldEqual viaApply.feedRange + viaCtor.feedRange shouldEqual normalizedRange + viaCtor.documentCount shouldEqual viaApply.documentCount + viaCtor.documentCount shouldEqual docCount + viaCtor.totalDocumentSizeInKB shouldEqual viaApply.totalDocumentSizeInKB + viaCtor.totalDocumentSizeInKB shouldEqual docSizeInKB + viaCtor.latestLsn shouldEqual viaApply.latestLsn + viaCtor.latestLsn shouldEqual latestLsn + viaCtor.firstLsn shouldEqual viaApply.firstLsn + viaCtor.firstLsn.get shouldEqual latestLsn - 10 + viaCtor.lastUpdated.get should be >= nowEpochMs + viaCtor.lastUpdated.get shouldEqual viaCtor.lastRetrieved.get + viaApply.lastUpdated.get should be >= nowEpochMs + viaApply.lastUpdated.get shouldEqual viaApply.lastRetrieved.get + } + + it should "create instance with valid parameters via apply in full fidelity mode" in { + + val clientConfig = CosmosClientConfiguration( + UUID.randomUUID().toString, + UUID.randomUUID().toString, + None, + UUID.randomUUID().toString, + useGatewayMode = false, + useEventualConsistency = true, + enableClientTelemetry = false, + disableTcpConnectionEndpointRediscovery = false, + clientTelemetryEndpoint = None, + preferredRegionsList = Option.empty) + + val containerConfig = CosmosContainerConfig(UUID.randomUUID().toString, UUID.randomUUID().toString) + val latestLsn = rnd.nextInt(10000000) + 1 + val firstLsn = Some(latestLsn - 10L) + + val normalizedRange = NormalizedRange(UUID.randomUUID().toString, UUID.randomUUID().toString) + val docCount = rnd.nextInt() + val docSizeInKB = rnd.nextInt() + + val nowEpochMs = Instant.now.toEpochMilli + val createdAt = new AtomicLong(nowEpochMs) + val lastRetrievedAt = new AtomicLong(nowEpochMs) + + val viaCtor = PartitionMetadata( + Map[String, String](), + clientConfig, + None, + containerConfig, + normalizedRange, + docCount, + docSizeInKB, + firstLsn, + latestLsn, + 0, + None, + createdAt, + lastRetrievedAt) + + val viaApply = PartitionMetadata( + Map[String, String](), + clientConfig, + None, + containerConfig, + normalizedRange, + docCount, + docSizeInKB, + firstLsn, + createChangeFeedState(latestLsn, "FULL_FIDELITY")) viaCtor.cosmosClientConfig should be theSameInstanceAs viaApply.cosmosClientConfig viaCtor.cosmosClientConfig should be theSameInstanceAs clientConfig @@ -581,16 +653,17 @@ class PartitionMetadataSpec extends UnitSpec { //scalastyle:on null //scalastyle:on multiple.string.literals - private[this] def createChangeFeedState(latestLsn: Long) = { + private[this] def createChangeFeedState(latestLsn: Long, mode: String) = { val collectionRid = UUID.randomUUID().toString val json = String.format( "{\"V\":1," + "\"Rid\":\"%s\"," + - "\"Mode\":\"INCREMENTAL\"," + + "\"Mode\":\"%s\"," + "\"StartFrom\":{\"Type\":\"BEGINNING\"}," + "\"Continuation\":%s}", collectionRid, + mode, String.format( "{\"V\":1," + "\"Rid\":\"%s\"," + diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/SparkE2EChangeFeedITest.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/SparkE2EChangeFeedITest.scala index b8d9aad4ddcba..b48199a9cd9ff 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/SparkE2EChangeFeedITest.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/SparkE2EChangeFeedITest.scala @@ -83,6 +83,52 @@ class SparkE2EChangeFeedITest ChangeFeedTable.defaultIncrementalChangeFeedSchemaForInferenceDisabled) shouldEqual true } + "spark change feed query (LatestVersion)" can "use default schema" in { + val cosmosEndpoint = TestConfigurations.HOST + val cosmosMasterKey = TestConfigurations.MASTER_KEY + + val container = cosmosClient.getDatabase(cosmosDatabase).getContainer(cosmosContainer) + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's cat") + objectNode.put("type", "cat") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + val cfg = Map( + "spark.cosmos.accountEndpoint" -> cosmosEndpoint, + "spark.cosmos.accountKey" -> cosmosMasterKey, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> cosmosContainer, + "spark.cosmos.read.maxItemCount" -> "2", + "spark.cosmos.read.inferSchema.enabled" -> "false" + ) + + val df = spark.read.format("cosmos.oltp.changeFeed").options(cfg).load() + val rowsArray = df.collect() + rowsArray should have size 2 + df.schema.equals( + ChangeFeedTable.defaultIncrementalChangeFeedSchemaForInferenceDisabled) shouldEqual true + + val cfgExplicit = Map( + "spark.cosmos.accountEndpoint" -> cosmosEndpoint, + "spark.cosmos.accountKey" -> cosmosMasterKey, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> cosmosContainer, + "spark.cosmos.read.inferSchema.enabled" -> "false", + "spark.cosmos.read.maxItemCount" -> "1", + "spark.cosmos.changeFeed.mode" -> "LatestVersion" + ) + + val dfExplicit = spark.read.format("cosmos.oltp.changeFeed").options(cfgExplicit).load() + val rowsArrayExplicit = dfExplicit.collect() + rowsArrayExplicit should have size 2 + dfExplicit.schema.equals( + ChangeFeedTable.defaultIncrementalChangeFeedSchemaForInferenceDisabled) shouldEqual true + } + "spark change feed query (incremental)" can "use user provided schema" in { val cosmosEndpoint = TestConfigurations.HOST val cosmosMasterKey = TestConfigurations.MASTER_KEY @@ -152,6 +198,38 @@ class SparkE2EChangeFeedITest ChangeFeedTable.defaultFullFidelityChangeFeedSchemaForInferenceDisabled) shouldEqual true } + "spark change feed query (all versions and deletes)" can "use default schema" in { + val cosmosEndpoint = TestConfigurations.HOST + val cosmosMasterKey = TestConfigurations.MASTER_KEY + + val container = cosmosClient.getDatabase(cosmosDatabase).getContainer(cosmosContainer) + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's cat") + objectNode.put("type", "cat") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + val cfg = Map( + "spark.cosmos.accountEndpoint" -> cosmosEndpoint, + "spark.cosmos.accountKey" -> cosmosMasterKey, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> cosmosContainer, + "spark.cosmos.read.inferSchema.enabled" -> "false", + "spark.cosmos.changeFeed.mode" -> "AllVersionsAndDeletes", + "spark.cosmos.read.maxItemCount" -> "1", + "spark.cosmos.changeFeed.startFrom" -> "NOW" + ) + + val df = spark.read.format("cosmos.oltp.changeFeed").options(cfg).load() + val rowsArray = df.collect() + rowsArray should have size 0 + df.schema.equals( + ChangeFeedTable.defaultFullFidelityChangeFeedSchemaForInferenceDisabled) shouldEqual true + } + "spark change feed micro batch (incremental)" can "use default schema" in { val cosmosEndpoint = TestConfigurations.HOST val cosmosMasterKey = TestConfigurations.MASTER_KEY diff --git a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/TestUtils.scala b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/TestUtils.scala index c60fe74a6b8f1..2db9191b79366 100644 --- a/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/TestUtils.scala +++ b/sdk/cosmos/azure-cosmos-spark_3_2-12/src/test/scala/com/azure/cosmos/spark/TestUtils.scala @@ -298,7 +298,7 @@ trait CosmosContainerWithRetention extends CosmosContainer { val properties: CosmosContainerProperties = new CosmosContainerProperties(cosmosContainer, partitionKeyPath) properties.setChangeFeedPolicy( - ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(10))) + ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(10))) val throughputProperties = ThroughputProperties.createManualThroughput(Defaults.DefaultContainerThroughput) diff --git a/sdk/cosmos/azure-cosmos/CHANGELOG.md b/sdk/cosmos/azure-cosmos/CHANGELOG.md index 3a2751c55de67..49bc2f1e81684 100644 --- a/sdk/cosmos/azure-cosmos/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos/CHANGELOG.md @@ -3,6 +3,7 @@ ### 4.37.0-beta.1 (Unreleased) #### Features Added +* Added new preview APIs to `ChangeFeedProcessor` for handling all versions and deletes changes - See [PR 30399](https://github.com/Azure/azure-sdk-for-java/pull/30399) * Added option to emit client-side metrics via micrometer.io MeterRegistry. - See [PR 30065](https://github.com/Azure/azure-sdk-for-java/pull/30065) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessor.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessor.java index bc456e675578f..28f6295bf13d1 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessor.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessor.java @@ -25,6 +25,9 @@ *
  • The delegate: the delegate is the code that defines what you, the developer, want to do with each batch of * changes that the change feed processor reads.
  • * + * + * Below is an example of building ChangeFeedProcessor for LatestVersion mode. + * * *
      * ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
    @@ -39,6 +42,23 @@
      *     .buildChangeFeedProcessor();
      * 
    * + * + * Below is an example of building ChangeFeedProcessor for AllVersionsAndDeletes mode. + * + * + *
    + * ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
    + *     .hostName(hostName)
    + *     .feedContainer(feedContainer)
    + *     .leaseContainer(leaseContainer)
    + *     .handleAllVersionsAndDeletesChanges(docs -> {
    + *         for (ChangeFeedProcessorItem item : docs) {
    + *             // Implementation for handling and processing of each ChangeFeedProcessorItem item goes here
    + *         }
    + *     })
    + *     .buildChangeFeedProcessor();
    + * 
    + * */ public interface ChangeFeedProcessor { diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessorBuilder.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessorBuilder.java index 201329c020c2c..e6a95747cf6b7 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessorBuilder.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/ChangeFeedProcessorBuilder.java @@ -3,7 +3,10 @@ package com.azure.cosmos; import com.azure.cosmos.implementation.changefeed.incremental.ChangeFeedProcessorBuilderImpl; +import com.azure.cosmos.implementation.changefeed.common.ChangeFeedMode; +import com.azure.cosmos.models.ChangeFeedProcessorItem; import com.azure.cosmos.models.ChangeFeedProcessorOptions; +import com.azure.cosmos.util.Beta; import com.fasterxml.jackson.databind.JsonNode; import java.util.List; @@ -12,6 +15,8 @@ /** * Helper class to build a {@link ChangeFeedProcessor} instance. * + * Below is an example of building ChangeFeedProcessor for LatestVersion mode. + * * *
      * ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
    @@ -26,6 +31,23 @@
      *     .buildChangeFeedProcessor();
      * 
    * + * + * Below is an example of building ChangeFeedProcessor for AllVersionsAndDeletes mode. + * + * + *
    + * ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
    + *     .hostName(hostName)
    + *     .feedContainer(feedContainer)
    + *     .leaseContainer(leaseContainer)
    + *     .handleAllVersionsAndDeletesChanges(docs -> {
    + *         for (ChangeFeedProcessorItem item : docs) {
    + *             // Implementation for handling and processing of each ChangeFeedProcessorItem item goes here
    + *         }
    + *     })
    + *     .buildChangeFeedProcessor();
    + * 
    + * */ public class ChangeFeedProcessorBuilder { private String hostName; @@ -33,6 +55,8 @@ public class ChangeFeedProcessorBuilder { private CosmosAsyncContainer leaseContainer; private ChangeFeedProcessorOptions changeFeedProcessorOptions; private Consumer> partitionKeyBasedLeaseConsumer; + private Consumer> epkRangeBasedLeaseConsumer; + private ChangeFeedMode changeFeedMode = ChangeFeedMode.INCREMENTAL; /** * Instantiates a new Cosmos a new ChangeFeedProcessor builder. @@ -77,7 +101,7 @@ public ChangeFeedProcessorBuilder leaseContainer(CosmosAsyncContainer leaseConta } /** - * Sets a consumer function which will be called to process changes. + * Sets a consumer function which will be called to process changes for LatestVersion change feed mode. * * *
    @@ -94,7 +118,30 @@ public ChangeFeedProcessorBuilder leaseContainer(CosmosAsyncContainer leaseConta
          */
         public ChangeFeedProcessorBuilder handleChanges(Consumer> consumer) {
             this.partitionKeyBasedLeaseConsumer = consumer;
    +        this.changeFeedMode = ChangeFeedMode.INCREMENTAL;
    +        return this;
    +    }
     
    +    /**
    +     * Sets a consumer function which will be called to process changes for AllVersionsAndDeletes change feed mode.
    +     *
    +     * 
    +     * 
    +     * .handleAllVersionsAndDeletesChanges(docs -> {
    +     *     for (ChangeFeedProcessorItem item : docs) {
    +     *         // Implementation for handling and processing of each ChangeFeedProcessorItem item goes here
    +     *     }
    +     * })
    +     * 
    + * + * + * @param consumer the {@link Consumer} to call for handling the feeds. + * @return current Builder. + */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public ChangeFeedProcessorBuilder handleAllVersionsAndDeletesChanges(Consumer> consumer) { + this.epkRangeBasedLeaseConsumer = consumer; + this.changeFeedMode = ChangeFeedMode.FULL_FIDELITY; return this; } @@ -127,17 +174,30 @@ public ChangeFeedProcessorBuilder options(ChangeFeedProcessorOptions changeFeedP public ChangeFeedProcessor buildChangeFeedProcessor() { validateChangeFeedProcessorBuilder(); - ChangeFeedProcessorBuilderImpl builder = new ChangeFeedProcessorBuilderImpl() - .hostName(this.hostName) - .feedContainer(this.feedContainer) - .leaseContainer(this.leaseContainer) - .handleChanges(this.partitionKeyBasedLeaseConsumer); - - if (this.changeFeedProcessorOptions != null) { - builder.options(this.changeFeedProcessorOptions); + if (ChangeFeedMode.INCREMENTAL.equals(changeFeedMode)) { + ChangeFeedProcessorBuilderImpl builder = new ChangeFeedProcessorBuilderImpl() + .hostName(this.hostName) + .feedContainer(this.feedContainer) + .leaseContainer(this.leaseContainer) + .handleChanges(this.partitionKeyBasedLeaseConsumer); + + if (this.changeFeedProcessorOptions != null) { + builder.options(this.changeFeedProcessorOptions); + } + + return builder.build(); + } else { + com.azure.cosmos.implementation.changefeed.fullfidelity.ChangeFeedProcessorBuilderImpl builder = + new com.azure.cosmos.implementation.changefeed.fullfidelity.ChangeFeedProcessorBuilderImpl() + .hostName(this.hostName) + .feedContainer(this.feedContainer) + .leaseContainer(this.leaseContainer) + .handleChanges(this.epkRangeBasedLeaseConsumer); + if (this.changeFeedProcessorOptions != null) { + builder.options(this.changeFeedProcessorOptions); + } + return builder.build(); } - - return builder.build(); } private void validateChangeFeedProcessorBuilder() { @@ -150,6 +210,10 @@ private void validateChangeFeedProcessorBuilder() { if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer cannot be null"); } + if ((partitionKeyBasedLeaseConsumer == null && epkRangeBasedLeaseConsumer == null) + || (partitionKeyBasedLeaseConsumer != null && epkRangeBasedLeaseConsumer != null)) { + throw new IllegalArgumentException("expecting either LatestVersion or AllVersionsAndDeletes consumer for handling change feed processor changes"); + } validateChangeFeedProcessorOptions(); } @@ -162,5 +226,15 @@ private void validateChangeFeedProcessorOptions() { // force a lot of resets and lead to a poor overall performance of ChangeFeedProcessor. throw new IllegalArgumentException("changeFeedProcessorOptions: expecting leaseRenewInterval less than leaseExpirationInterval"); } + // Some extra checks for all versions and deletes mode + if (ChangeFeedMode.FULL_FIDELITY.equals(changeFeedMode)) { + if (this.changeFeedProcessorOptions.getStartTime() != null) { + throw new IllegalStateException("changeFeedProcessorOptions: AllVersionsAndDeletes change feed mode is not supported for startTime option."); + } + + if (this.changeFeedProcessorOptions.isStartFromBeginning()) { + throw new IllegalStateException("changeFeedProcessorOptions: AllVersionsAndDeletes change feed mode is not supported for startFromBeginning option."); + } + } } } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/DocumentCollection.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/DocumentCollection.java index 2dfc84903a66e..2cf36cf94ca3b 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/DocumentCollection.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/DocumentCollection.java @@ -274,7 +274,7 @@ public ChangeFeedPolicy getChangeFeedPolicy() { ChangeFeedPolicy policy = super.getObject(Constants.Properties.CHANGE_FEED_POLICY, ChangeFeedPolicy.class); if (policy == null) { - return ChangeFeedPolicy.createIncrementalPolicy(); + return ChangeFeedPolicy.createLatestVersionPolicy(); } return policy; diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/HttpConstants.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/HttpConstants.java index 9b64c17b3b8c4..49a60b6378d12 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/HttpConstants.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/HttpConstants.java @@ -279,7 +279,7 @@ public static class HttpHeaders { public static class A_IMHeaderValues { public static final String INCREMENTAL_FEED = "Incremental Feed"; - public static final String FullFidelityFeed = "Full-Fidelity Feed"; + public static final String FULL_FIDELITY_FEED = "Full-Fidelity Feed"; } public static class Versions { diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Utils.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Utils.java index bf40752bbf909..cfa1087d52d39 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Utils.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Utils.java @@ -16,6 +16,7 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.fasterxml.jackson.module.afterburner.AfterburnerModule; import io.netty.buffer.ByteBuf; import org.slf4j.Logger; @@ -101,6 +102,8 @@ private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplica objectMapper.registerModule(new AfterburnerModule()); } + objectMapper.registerModule(new JavaTimeModule()); + return objectMapper; } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMode.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMode.java index cd5daec31f3af..7fd0b9a7d7d29 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMode.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMode.java @@ -5,15 +5,17 @@ /** * Change feed mode + * NOTE: We cannot rename these enums. + * They are part of continuation token for Change feed pull model and are already in use for spark customers. */ public enum ChangeFeedMode { /** - * Incremental mode is the latest version change only. This mode only provides changes for Create, Replace and Upsert operations. + * Represents the latest version/Incremental change only. This mode only provides changes for Create, Replace and Upsert operations. * Only the most recent change for a given item is included in the change log. Intermediate changes may not be available. */ INCREMENTAL, /** - * Full Fidelity model is all version changes. This mode provides changes for Create, Replace, Upsert and Delete operations. + * Represents all version changes including deletes/Full Fidelity. This mode provides changes for Create, Replace, Upsert and Delete operations. * All changes for a given item are included in the changes log. */ FULL_FIDELITY diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedStateV1.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedStateV1.java index 261264800d749..e48d58dad5e01 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedStateV1.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedStateV1.java @@ -169,7 +169,7 @@ public void populateRequest(RxDocumentServiceRequest request, int maxItemCount) case FULL_FIDELITY: request.getHeaders().put( HttpConstants.HttpHeaders.A_IM, - HttpConstants.A_IMHeaderValues.FullFidelityFeed); + HttpConstants.A_IMHeaderValues.FULL_FIDELITY_FEED); // This is the new wire format, which only gets passed for Full Fidelity Change Feed request.getHeaders().put( HttpConstants.HttpHeaders.CHANGE_FEED_WIRE_FORMAT_VERSION, diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/ChangeFeedProcessorBuilderImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/ChangeFeedProcessorBuilderImpl.java index 09f336d00472a..370f0fb8b55d3 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/ChangeFeedProcessorBuilderImpl.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/ChangeFeedProcessorBuilderImpl.java @@ -26,7 +26,7 @@ import com.azure.cosmos.implementation.changefeed.common.EqualPartitionsBalancingStrategy; import com.azure.cosmos.implementation.changefeed.common.PartitionedByIdCollectionRequestOptionsFactory; import com.azure.cosmos.implementation.changefeed.common.TraceHealthMonitor; -import com.azure.cosmos.implementation.changefeed.common.ChangeFeedProcessorItem; +import com.azure.cosmos.models.ChangeFeedProcessorItem; import com.azure.cosmos.models.ChangeFeedProcessorOptions; import com.azure.cosmos.models.ChangeFeedProcessorState; import com.azure.cosmos.models.CosmosChangeFeedRequestOptions; @@ -151,7 +151,7 @@ public Mono> getCurrentState() { CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions .createForProcessingFromNow(lease.getFeedRange()) .setMaxItemCount(1) - .fullFidelity(); + .allVersionsAndDeletes(); return this.feedContextClient .createDocumentChangeFeedQuery(this.feedContextClient.getContainerClient(), options, ChangeFeedProcessorItem.class) diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorFactoryImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorFactoryImpl.java index f8387861a8203..b95a34a3077a7 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorFactoryImpl.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorFactoryImpl.java @@ -18,7 +18,7 @@ import com.azure.cosmos.implementation.changefeed.common.ChangeFeedStateV1; import com.azure.cosmos.implementation.feedranges.FeedRangeInternal; import com.azure.cosmos.implementation.changefeed.common.ChangeFeedMode; -import com.azure.cosmos.implementation.changefeed.common.ChangeFeedProcessorItem; +import com.azure.cosmos.models.ChangeFeedProcessorItem; import com.azure.cosmos.models.ChangeFeedProcessorOptions; /** diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorImpl.java index 510f82ecce3dc..d4da69c799165 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorImpl.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionProcessorImpl.java @@ -20,7 +20,7 @@ import com.azure.cosmos.implementation.changefeed.exceptions.PartitionNotFoundException; import com.azure.cosmos.implementation.changefeed.exceptions.PartitionSplitException; import com.azure.cosmos.implementation.changefeed.exceptions.TaskCancelledException; -import com.azure.cosmos.implementation.changefeed.common.ChangeFeedProcessorItem; +import com.azure.cosmos.models.ChangeFeedProcessorItem; import com.azure.cosmos.models.CosmosChangeFeedRequestOptions; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.ModelBridgeInternal; @@ -67,7 +67,7 @@ public PartitionProcessorImpl(ChangeFeedObserver observ ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); - this.options.setMaxItemCount(settings.getMaxItemCount()).fullFidelity(); + this.options.setMaxItemCount(settings.getMaxItemCount()).allVersionsAndDeletes(); } @Override @@ -127,14 +127,14 @@ public Mono run(CancellationToken cancellationToken) { .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions - .createForProcessingFromContinuation(continuationToken).fullFidelity(); + .createForProcessingFromContinuation(continuationToken).allVersionsAndDeletes(); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions - .createForProcessingFromContinuation(continuationToken).fullFidelity(); + .createForProcessingFromContinuation(continuationToken).allVersionsAndDeletes(); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorFactoryImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorFactoryImpl.java index faff7e40d8a7b..bd8bd802e40a8 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorFactoryImpl.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorFactoryImpl.java @@ -11,7 +11,7 @@ import com.azure.cosmos.implementation.changefeed.PartitionProcessorFactory; import com.azure.cosmos.implementation.changefeed.PartitionSupervisor; import com.azure.cosmos.implementation.changefeed.PartitionSupervisorFactory; -import com.azure.cosmos.implementation.changefeed.common.ChangeFeedProcessorItem; +import com.azure.cosmos.models.ChangeFeedProcessorItem; import com.azure.cosmos.models.ChangeFeedProcessorOptions; import reactor.core.scheduler.Scheduler; diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorImpl.java index 51cbc417f7c1d..1a24598138a69 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorImpl.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/fullfidelity/PartitionSupervisorImpl.java @@ -17,7 +17,7 @@ import com.azure.cosmos.implementation.changefeed.exceptions.ObserverException; import com.azure.cosmos.implementation.changefeed.exceptions.PartitionSplitException; import com.azure.cosmos.implementation.changefeed.exceptions.TaskCancelledException; -import com.azure.cosmos.implementation.changefeed.common.ChangeFeedProcessorItem; +import com.azure.cosmos.models.ChangeFeedProcessorItem; import reactor.core.publisher.Mono; import reactor.core.scheduler.Scheduler; diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMetaData.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedMetaData.java similarity index 72% rename from sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMetaData.java rename to sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedMetaData.java index a707a38b63c5d..36f95c9fbe390 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedMetaData.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedMetaData.java @@ -1,17 +1,21 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.cosmos.implementation.changefeed.common; +package com.azure.cosmos.models; import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.util.Beta; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.JsonProcessingException; +import java.time.Instant; + /** * Change Feed response meta data */ +@Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public final class ChangeFeedMetaData { @JsonProperty("crts") - private long conflictResolutionTimestamp; + private Instant conflictResolutionTimestamp; @JsonProperty("lsn") private long logSequenceNumber; @JsonProperty("operationType") @@ -26,7 +30,8 @@ public final class ChangeFeedMetaData { * * @return conflict resolution timestamp */ - public long getConflictResolutionTimestamp() { + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public Instant getConflictResolutionTimestamp() { return conflictResolutionTimestamp; } @@ -35,6 +40,7 @@ public long getConflictResolutionTimestamp() { * * @return current logical sequence number */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public long getLogSequenceNumber() { return logSequenceNumber; } @@ -44,6 +50,7 @@ public long getLogSequenceNumber() { * * @return change Feed operation type */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public ChangeFeedOperationType getOperationType() { return operationType; } @@ -53,6 +60,7 @@ public ChangeFeedOperationType getOperationType() { * * @return previous logical sequence number */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public long getPreviousLogSequenceNumber() { return previousLogSequenceNumber; } @@ -63,6 +71,7 @@ public long getPreviousLogSequenceNumber() { * * @return true if ttlExpiration caused the delete. */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public boolean isTimeToLiveExpired() { return timeToLiveExpired; } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedOperationType.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedOperationType.java similarity index 75% rename from sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedOperationType.java rename to sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedOperationType.java index db6ae7defa65a..59b051ed70cf7 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedOperationType.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedOperationType.java @@ -1,12 +1,14 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.cosmos.implementation.changefeed.common; +package com.azure.cosmos.models; +import com.azure.cosmos.util.Beta; import com.fasterxml.jackson.annotation.JsonProperty; /** * Change feed operation type */ +@Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public enum ChangeFeedOperationType { /** * Represents Create operation diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedPolicy.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedPolicy.java index 839af8c5a7fe7..779aa07ca7b70 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedPolicy.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedPolicy.java @@ -14,20 +14,20 @@ * Represents the change feed policy configuration for the container in the Azure Cosmos DB service. * *

    - * The example below creates a new container with a change feed policy for full fidelity change feed with a + * The example below creates a new container with a change feed policy for AllVersionsAndDeletes change feed with a * retention window of 8 minutes - so intermediary snapshots of changes as well as deleted documents would be - * available for processing for 8 minutes before they vanish. Processing the change feed with full fidelity mode will - * only be able within this retention window - if you attempt to process a change feed after more - * than the retention window (8 minutes in this sample) an error (Status Code 400) will be returned. It would - * still be possible to process changes using Incremental mode even when configuring a full fidelity change - * feed policy with retention window on the container and when using Incremental mode it doesn't matter whether - * your are out of the retention window or not. + * available for processing for 8 minutes before they vanish. + * Processing the change feed with AllVersionsAndDeletes mode will only be able within this retention window - if you attempt to process a change feed after more + * than the retention window (8 minutes in this sample) an error (Status Code 400) will be returned. + * It would still be possible to process changes using LatestVersion mode even when configuring a AllVersionsAndDeletes change + * feed policy with retention window on the container and when using LatestVersion mode it doesn't matter whether + * you are out of the retention window or not. * *

    {@code
      *
      * CosmosContainerProperties containerProperties =
      *      new CosmosContainerProperties("ContainerName", "/somePartitionKeyProperty");
    - * containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(8));
    + * containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(8));
      *
      * CosmosAsyncDatabase database = client.createDatabase(databaseProperties).block().getDatabase();
      * CosmosAsyncContainer container = database.createContainer(containerProperties).block().getContainer();
    @@ -35,16 +35,16 @@
      * }
      * 
    *

    - * The example below creates a new container with a change feed policy for incremental change feed. Processing - * the change feed with full fidelity mode will not be possible for this container. It would still be possible to - * process changes using Incremental mode. The Incremental change feed policy is also the default that - * is used when not explicitly specifying a change feed policy. + * The example below creates a new container with a change feed policy for LatestVersion change feed. + * Processing the change feed with AllVersionsAndDeletes mode will not be possible for this container. + * It would still be possible to process changes using LatestVersion mode. + * The LatestVersion change feed policy is also the default that is used when not explicitly specifying a change feed policy. * *

    {@code
      *
      * CosmosContainerProperties containerProperties =
      *      new CosmosContainerProperties("ContainerName", "/somePartitionKeyProperty");
    - * containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy());
    + * containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy());
      *
      * CosmosAsyncDatabase database = client.createDatabase(databaseProperties).block().getDatabase();
      * CosmosAsyncContainer container = database.createContainer(containerProperties).block().getContainer();
    @@ -59,17 +59,17 @@ public final class ChangeFeedPolicy {
         private final JsonSerializable jsonSerializable;
     
         /**
    -     * Creates a ChangeFeedPolicy with retention duration for full fidelity processing
    +     * Creates a ChangeFeedPolicy with retention duration for AllVersionsAndDeletes processing
          *
          * @param retentionDuration  - the retention duration (max granularity in minutes) in which it
    -     *                             will be possible to process change feed events with full fidelity
    -     *                             mode (meaning intermediary changes and deletes
    -     *                             will be exposed in change feed).
    +     *                             will be possible to process change feed events with AllVersionsAndDeletes mode.
          *
    -     * @return ChangeFeedPolicy for full fidelity change feed.
    +     * @return ChangeFeedPolicy for AllVersionsAndDeletes change feed.
    +     * @deprecated use {@link ChangeFeedPolicy#createAllVersionsAndDeletesPolicy(Duration)} instead.
          */
         @Beta(value = Beta.SinceVersion.V4_12_0,
             warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
    +    @Deprecated //since = "V4_37_0", forRemoval = true
         public static ChangeFeedPolicy createFullFidelityPolicy(Duration retentionDuration) {
     
             if (retentionDuration.isNegative() ||
    @@ -82,25 +82,71 @@ public static ChangeFeedPolicy createFullFidelityPolicy(Duration retentionDurati
             }
     
             ChangeFeedPolicy policy = new ChangeFeedPolicy();
    -        policy.setFullFidelityRetentionDurationInMinutes((int)(retentionDuration.getSeconds() / 60));
    +        policy.setRetentionDurationForAllVersionsAndDeletesPolicyInMinutes((int)retentionDuration.toMinutes());
             return policy;
         }
     
         /**
    -     * Creates a default ChangeFeedPolicy without retention duration specified. With the default/incremental
    +     * Creates a ChangeFeedPolicy with retention duration for AllVersionsAndDeletes processing
    +     *
    +     * @param retentionDuration  - the retention duration (max granularity in minutes) in which it
    +     *                             will be possible to process change feed events with AllVersionsAndDeletes mode.
    +     *
    +     * @return ChangeFeedPolicy for AllVersionsAndDeletes change feed.
    +     */
    +    @Beta(value = Beta.SinceVersion.V4_37_0,
    +        warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING)
    +    public static ChangeFeedPolicy createAllVersionsAndDeletesPolicy(Duration retentionDuration) {
    +
    +        if (retentionDuration.isNegative() ||
    +            retentionDuration.isZero() ||
    +            retentionDuration.getNano() != 0 ||
    +            retentionDuration.getSeconds() % 60 != 0) {
    +            throw new IllegalArgumentException(
    +                "Argument retentionDuration must be a duration of a positive number of minutes."
    +            );
    +        }
    +
    +        ChangeFeedPolicy policy = new ChangeFeedPolicy();
    +        policy.setRetentionDurationForAllVersionsAndDeletesPolicyInMinutes((int)retentionDuration.toMinutes());
    +        return policy;
    +    }
    +
    +    /**
    +     * Creates a default ChangeFeedPolicy without retention duration specified. With the default/LatestVersion
          * change feed it will not be possible to process intermediary changes or deletes.
          * 

    * This is the default policy being used when not specifying any ChangeFeedPolicy for the Container. *

    * - * @return ChangeFeedPolicy for default/incremental change feed without full fidelity. + * @return ChangeFeedPolicy for default/LatestVersion change feed without AllVersionsAndDeletes. + * @deprecated use {@link ChangeFeedPolicy#createLatestVersionPolicy()} instead. */ @Beta(value = Beta.SinceVersion.V4_12_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + @Deprecated //since = "V4_37_0", forRemoval = true public static ChangeFeedPolicy createIncrementalPolicy() { ChangeFeedPolicy policy = new ChangeFeedPolicy(); - policy.setFullFidelityRetentionDurationInMinutes(null); + policy.setRetentionDurationForAllVersionsAndDeletesPolicyInMinutes(null); + return policy; + } + + /** + * Creates a default ChangeFeedPolicy without retention duration specified. With the default/LatestVersion + * change feed it will not be possible to process intermediary changes or deletes. + *

    + * This is the default policy being used when not specifying any ChangeFeedPolicy for the Container. + *

    + * + * @return ChangeFeedPolicy for default/LatestVersion change feed without AllVersionsAndDeletes. + */ + @Beta(value = Beta.SinceVersion.V4_37_0, + warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public static ChangeFeedPolicy createLatestVersionPolicy() { + + ChangeFeedPolicy policy = new ChangeFeedPolicy(); + policy.setRetentionDurationForAllVersionsAndDeletesPolicyInMinutes(null); return policy; } @@ -131,27 +177,43 @@ public static ChangeFeedPolicy createIncrementalPolicy() { /** * Gets the retention duration in which it will be possible to - * process change feed events with full fidelity mode (meaning intermediary changes and deletes - * will be exposed in change feed). - * By default full fidelity change feed is not enabled - so the retention duration would be Duration.ZERO. + * process change feed events with AllVersionsAndDeletes mode + * (meaning intermediary changes and deletes will be exposed in change feed). + * By default AllVersionsAndDeletes change feed is not enabled - so the retention duration would be Duration.ZERO. * - * @return full fidelity retention duration. + * @return AllVersionsAndDeletes retention duration. + * @deprecated use {@link ChangeFeedPolicy#getRetentionDurationForAllVersionsAndDeletesPolicy()} instead */ @Beta(value = Beta.SinceVersion.V4_12_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + @Deprecated //since = "V4_37_0", forRemoval = true public Duration getFullFidelityRetentionDuration() { - return Duration.ofMinutes(this.getFullFidelityRetentionDurationInMinutes()); + return this.getRetentionDurationForAllVersionsAndDeletesPolicy(); + } + + /** + * Gets the retention duration in which it will be possible to + * process change feed events with AllVersionsAndDeletes mode + * (meaning intermediary changes and deletes will be exposed in change feed). + * By default AllVersionsAndDeletes change feed is not enabled - so the retention duration would be Duration.ZERO. + * + * @return AllVersionsAndDeletes retention duration. + */ + @Beta(value = Beta.SinceVersion.V4_37_0, + warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public Duration getRetentionDurationForAllVersionsAndDeletesPolicy() { + return Duration.ofMinutes(this.getRetentionDurationForAllVersionsAndDeletesPolicyInMinutes()); } /** - * Gets the retention duration in minutes in which it will be possible to - * process change feed events with full fidelity mode (meaning intermediary changes and deletes - * will be exposed in change feed). - * By default full fidelity change feed is not enabled - so the retention duration would be 0. + * Gets the retention duration in which it will be possible to + * process change feed events with AllVersionsAndDeletes mode + * (meaning intermediary changes and deletes will be exposed in change feed). + * By default AllVersionsAndDeletes change feed is not enabled - so the retention duration would be Duration.ZERO. * - * @return full fidelity retention duration in minutes. + * @return AllVersionsAndDeletes retention duration. */ - int getFullFidelityRetentionDurationInMinutes() { + int getRetentionDurationForAllVersionsAndDeletesPolicyInMinutes() { Integer intValue = this.jsonSerializable.getInt(Constants.Properties.LOG_RETENTION_DURATION); @@ -164,15 +226,15 @@ int getFullFidelityRetentionDurationInMinutes() { /** * Sets the retention duration in minutes in which it will be possible to - * process change feed events with full fidelity mode (meaning intermediary changes - * and deletes will be exposed in change feed). + * process change feed events with AllVersionsAndDeletes mode + * (meaning intermediary changes and deletes will be exposed in change feed). * If the value of the {@param retentionDurationInMinutes} argument is null, 0 or negative - * no full fidelity change feed is available for the container and change feed events can only - * be processed with the default mode "Incremental". + * no AllVersionsAndDeletes change feed is available for the container and change feed events can only + * be processed with the default mode LatestVersion. * - * @param retentionDurationInMinutes - Full fidelity retention duration in minutes. + * @param retentionDurationInMinutes - AllVersionsAndDeletes retention duration in minutes. */ - ChangeFeedPolicy setFullFidelityRetentionDurationInMinutes(Integer retentionDurationInMinutes) { + ChangeFeedPolicy setRetentionDurationForAllVersionsAndDeletesPolicyInMinutes(Integer retentionDurationInMinutes) { if (retentionDurationInMinutes == null || retentionDurationInMinutes <= 0) { this.jsonSerializable.set( Constants.Properties.LOG_RETENTION_DURATION, diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedProcessorItem.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedProcessorItem.java similarity index 65% rename from sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedProcessorItem.java rename to sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedProcessorItem.java index fe825963fec40..36e451ade36fe 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/common/ChangeFeedProcessorItem.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ChangeFeedProcessorItem.java @@ -1,8 +1,9 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.cosmos.implementation.changefeed.common; +package com.azure.cosmos.models; import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.util.Beta; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.JsonProcessingException; @@ -14,6 +15,7 @@ * * Caller is recommended to type cast {@link JsonNode} to cosmos item structure. */ +@Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public final class ChangeFeedProcessorItem { @JsonProperty("current") @JsonInclude(JsonInclude.Include.NON_NULL) @@ -30,6 +32,7 @@ public final class ChangeFeedProcessorItem { * * @return change feed current item. */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public JsonNode getCurrent() { return current; } @@ -41,6 +44,7 @@ public JsonNode getCurrent() { * * @return change feed previous item. */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public JsonNode getPrevious() { return previous; } @@ -50,10 +54,24 @@ public JsonNode getPrevious() { * * @return change feed metadata. */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) public ChangeFeedMetaData getChangeFeedMetaData() { return changeFeedMetaData; } + /** + * Helper API to convert this changeFeedProcessorItem instance to raw JsonNode format. + * + * @return jsonNode format of this changeFeedProcessorItem instance. + * + * @throws IllegalArgumentException If conversion fails due to incompatible type; + * if so, root cause will contain underlying checked exception data binding functionality threw + */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public JsonNode toJsonNode() { + return Utils.getSimpleObjectMapper().convertValue(this, JsonNode.class); + } + @Override public String toString() { try { diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosChangeFeedRequestOptions.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosChangeFeedRequestOptions.java index 67a761329e1c3..1d8b87924ca6a 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosChangeFeedRequestOptions.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosChangeFeedRequestOptions.java @@ -344,30 +344,32 @@ CosmosChangeFeedRequestOptions withCosmosPagedFluxOptions( /** * Changes the change feed mode so that the change feed will contain events for creations, - * deletes as well as all intermediary snapshots for updates. Enabling full fidelity change feed - * mode requires configuring a retention duration in the change feed policy of the + * deletes as well as all intermediary snapshots for updates. Enabling AllVersionsAndDeletes + * change feed mode requires configuring a retention duration in the change feed policy of the * container. {@link ChangeFeedPolicy} *

    * Intermediary snapshots of changes as well as deleted documents would be - * available for processing for 8 minutes before they vanish. - * When enabling full fidelity mode you will only be able to process change feed events + * available for processing for retention window before they vanish. + * When enabling AllVersionsAndDeletes mode you will only be able to process change feed events * within the retention window configured in the change feed policy of the container. * If you attempt to process a change feed after more than the retention window * an error (Status Code 400) will be returned because the events for intermediary * updates and deletes have vanished. - * It would still be possible to process changes using Incremental mode even when - * configuring a full fidelity change feed policy with retention window on the container - * and when using Incremental mode it doesn't matter whether your are out of the retention + * It would still be possible to process changes using LatestVersion mode even when + * configuring a AllVersionsAndDeletes change feed policy with retention window on the container + * and when using LatestVersion mode it doesn't matter whether your are out of the retention * window or not - but no events for deletes or intermediary updates would be included. * When events are not getting processed within the retention window it is also possible - * to continue processing future events in full fidelity mode by querying the change feed + * to continue processing future events in AllVersionsAndDeletes mode by querying the change feed * with a new CosmosChangeFeedRequestOptions instance. *

    * - * @return a {@link CosmosChangeFeedRequestOptions} instance with full fidelity mode enabled + * @return a {@link CosmosChangeFeedRequestOptions} instance with AllVersionsAndDeletes mode enabled + * @deprecated use {@link CosmosChangeFeedRequestOptions#allVersionsAndDeletes()} instead. */ @Beta(value = Beta.SinceVersion.V4_12_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + @Deprecated //since = "V4_37_0", forRemoval = true public CosmosChangeFeedRequestOptions fullFidelity() { if (!this.startFromInternal.supportsFullFidelityRetention()) { @@ -382,6 +384,46 @@ public CosmosChangeFeedRequestOptions fullFidelity() { return this; } + /** + * Changes the change feed mode so that the change feed will contain events for creations, + * deletes as well as all intermediary snapshots for updates. Enabling AllVersionsAndDeletes + * change feed mode requires configuring a retention duration in the change feed policy of the + * container. {@link ChangeFeedPolicy} + *

    + * Intermediary snapshots of changes as well as deleted documents would be + * available for processing for 8 minutes before they vanish. + * When enabling AllVersionsAndDeletes mode you will only be able to process change feed events + * within the retention window configured in the change feed policy of the container. + * If you attempt to process a change feed after more than the retention window + * an error (Status Code 400) will be returned because the events for intermediary + * updates and deletes have vanished. + * It would still be possible to process changes using LatestVersion mode even when + * configuring a AllVersionsAndDeletes change feed policy with retention window on the container + * and when using LatestVersion mode it doesn't matter whether your are out of the retention + * window or not - but no events for deletes or intermediary updates would be included. + * When events are not getting processed within the retention window it is also possible + * to continue processing future events in AllVersionsAndDeletes mode by querying the change feed + * with a new CosmosChangeFeedRequestOptions instance. + *

    + * + * @return a {@link CosmosChangeFeedRequestOptions} instance with AllVersionsAndDeletes mode enabled + */ + @Beta(value = Beta.SinceVersion.V4_37_0, warningText = + Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public CosmosChangeFeedRequestOptions allVersionsAndDeletes() { + + if (!this.startFromInternal.supportsFullFidelityRetention()) { + throw new IllegalStateException( + "All Versions and Deletes mode is not supported for the chosen change feed start from " + + "option. Use CosmosChangeFeedRequestOptions.createForProcessingFromNow or " + + "CosmosChangeFeedRequestOptions.createFromContinuation instead." + ); + } + + this.mode = ChangeFeedMode.FULL_FIDELITY; + return this; + } + /** * Get the throughput control group name. * diff --git a/sdk/cosmos/azure-cosmos/src/samples/java/com/azure/cosmos/ChangeFeedProcessorAllVersionsAndDeletesModeCodeSnippet.java b/sdk/cosmos/azure-cosmos/src/samples/java/com/azure/cosmos/ChangeFeedProcessorAllVersionsAndDeletesModeCodeSnippet.java new file mode 100644 index 0000000000000..8dc2320c2e2c9 --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/samples/java/com/azure/cosmos/ChangeFeedProcessorAllVersionsAndDeletesModeCodeSnippet.java @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos; + +import com.azure.cosmos.implementation.TestConfigurations; +import com.azure.cosmos.models.ChangeFeedProcessorItem; + +/** + * Code snippets for AllVersionsAndDeletesChangeFeedProcessor + */ +public class ChangeFeedProcessorAllVersionsAndDeletesModeCodeSnippet { + + public void changeFeedProcessorBuilderCodeSnippet() { + String hostName = "test-host-name"; + CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() + .endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .contentResponseOnWriteEnabled(true) + .consistencyLevel(ConsistencyLevel.SESSION) + .buildAsyncClient(); + CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient.getDatabase("testDb"); + CosmosAsyncContainer feedContainer = cosmosAsyncDatabase.getContainer("feedContainer"); + CosmosAsyncContainer leaseContainer = cosmosAsyncDatabase.getContainer("leaseContainer"); + // BEGIN: com.azure.cosmos.allVersionsAndDeletesChangeFeedProcessor.builder + ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder() + .hostName(hostName) + .feedContainer(feedContainer) + .leaseContainer(leaseContainer) + .handleAllVersionsAndDeletesChanges(docs -> { + for (ChangeFeedProcessorItem item : docs) { + // Implementation for handling and processing of each ChangeFeedProcessorItem item goes here + } + }) + .buildChangeFeedProcessor(); + // END: com.azure.cosmos.allVersionsAndDeletesChangeFeedProcessor.builder + } + + public void handleChangesCodeSnippet() { + String hostName = "test-host-name"; + CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() + .endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .contentResponseOnWriteEnabled(true) + .consistencyLevel(ConsistencyLevel.SESSION) + .buildAsyncClient(); + CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient.getDatabase("testDb"); + CosmosAsyncContainer feedContainer = cosmosAsyncDatabase.getContainer("feedContainer"); + CosmosAsyncContainer leaseContainer = cosmosAsyncDatabase.getContainer("leaseContainer"); + ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder() + .hostName(hostName) + .feedContainer(feedContainer) + .leaseContainer(leaseContainer) + // BEGIN: com.azure.cosmos.allVersionsAndDeletesChangeFeedProcessor.handleChanges + .handleAllVersionsAndDeletesChanges(docs -> { + for (ChangeFeedProcessorItem item : docs) { + // Implementation for handling and processing of each ChangeFeedProcessorItem item goes here + } + }) + // END: com.azure.cosmos.allVersionsAndDeletesChangeFeedProcessor.handleChanges + .buildChangeFeedProcessor(); + } +} + diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerChangeFeedTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerChangeFeedTest.java index b72438afa1187..7f695970f91a4 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerChangeFeedTest.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerChangeFeedTest.java @@ -112,7 +112,7 @@ public void before_CosmosContainerTest() { @Test(groups = { "emulator" }, timeOut = TIMEOUT * 5) public void asyncChangeFeed_fromBeginning_incremental_forFullRange() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(200, 7); updateDocuments(3, 5); @@ -150,7 +150,7 @@ public void asyncChangeFeed_fromBeginning_incremental_forFullRange() throws Exce @Test(groups = { "emulator" }, timeOut = TIMEOUT ) public void asyncChangeFeed_fromBeginning_incremental_forFullRange_withSmallPageSize() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(200, 7); updateDocuments(3, 5); @@ -196,7 +196,7 @@ public void asyncChangeFeed_fromBeginning_incremental_forFullRange_withSmallPage @Test(groups = { "emulator" }, timeOut = TIMEOUT, retryAnalyzer = RetryAnalyzer.class) public void asyncChangeFeed_fromBeginning_incremental_forLogicalPartition() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(20, 7); updateDocuments(3, 5); @@ -249,7 +249,7 @@ public void asyncChangeFeed_fromBeginning_incremental_forLogicalPartition() thro @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void asyncChangeFeed_fromBeginning_incremental_forEPK() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(20, 7); updateDocuments(3, 5); @@ -320,7 +320,7 @@ public void asyncChangeFeed_fromBeginning_incremental_forEPK() throws Exception @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void asyncChangeFeed_fromBeginning_incremental_forFeedRange() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(200, 7); updateDocuments(3, 5); @@ -371,14 +371,14 @@ public void asyncChangeFeed_fromBeginning_fullFidelity_forFullRange() throws Exc IllegalStateException.class, () -> CosmosChangeFeedRequestOptions .createForProcessingFromBeginning(FeedRange.forFullRange()) - .fullFidelity()); + .allVersionsAndDeletes()); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void asyncChangeFeed_fromNow_incremental_forFullRange() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(20, 7); updateDocuments(3, 5); @@ -418,7 +418,7 @@ public void asyncChangeFeed_fromNow_incremental_forFullRange() throws Exception @Test(groups = { "emulator" }, timeOut = TIMEOUT, enabled = false) public void asyncChangeFeed_fromNow_fullFidelity_forFullRange() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(10))) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(10))) ); insertDocuments(8, 15); updateDocuments(3, 5); @@ -480,7 +480,7 @@ public void asyncChangeFeed_fromNow_fullFidelity_forFullRange() throws Exception @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void asyncChangeFeed_fromPointInTime_incremental_forFullRange() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(20, 7); updateDocuments(3, 5); @@ -525,13 +525,13 @@ public void asyncChangeFeed_fromPointInTime_fullFidelity_forFullRange() throws E .createForProcessingFromPointInTime( Instant.now().minus(10, ChronoUnit.SECONDS), FeedRange.forFullRange()) - .fullFidelity()); + .allVersionsAndDeletes()); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void syncChangeFeed_fromBeginning_incremental_forFullRange() throws Exception { this.createContainer( - (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()) + (cp) -> cp.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()) ); insertDocuments(200, 7); updateDocuments(3, 5); diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerTest.java index 00e5b8856a22d..36ce99c42b75e 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerTest.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/CosmosContainerTest.java @@ -42,14 +42,12 @@ import org.testng.annotations.Test; import reactor.core.publisher.Mono; -import java.io.ByteArrayOutputStream; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.ArrayList; import java.util.Base64; import java.util.List; import java.util.UUID; -import java.util.ArrayList; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; @@ -279,7 +277,7 @@ public void createContainer_withFullFidelityChangeFeedPolicy() throws Exception String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy( - ChangeFeedPolicy.createFullFidelityPolicy( + ChangeFeedPolicy.createAllVersionsAndDeletesPolicy( Duration.ofMinutes(8))); int throughput = 1000; @@ -289,7 +287,7 @@ public void createContainer_withFullFidelityChangeFeedPolicy() throws Exception validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ofMinutes(8)); } @@ -297,7 +295,7 @@ public void createContainer_withFullFidelityChangeFeedPolicy() throws Exception public void createContainer_withIncrementalChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); - containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()); + containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createLatestVersionPolicy()); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, @@ -306,7 +304,7 @@ public void createContainer_withIncrementalChangeFeedPolicy() throws Exception { validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ZERO); } @@ -322,7 +320,7 @@ public void createContainer_withDefaultChangeFeedPolicy() throws Exception { validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ZERO); } @@ -708,7 +706,7 @@ public void enableFullFidelityChangeFeedForExistingContainer() throws Exception this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ZERO); CosmosContainerResponse replaceResponse = @@ -716,10 +714,10 @@ public void enableFullFidelityChangeFeedForExistingContainer() throws Exception .replace(containerResponse .getProperties() .setChangeFeedPolicy( - ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(4)))); + ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(4)))); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ofMinutes(4)); } @@ -727,7 +725,7 @@ public void enableFullFidelityChangeFeedForExistingContainer() throws Exception public void changeFullFidelityChangeFeedRetentionDurationForExistingContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); - containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(3))); + containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(3))); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); @@ -735,7 +733,7 @@ public void changeFullFidelityChangeFeedRetentionDurationForExistingContainer() validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ofMinutes(3)); CosmosContainerResponse replaceResponse = @@ -743,10 +741,10 @@ public void changeFullFidelityChangeFeedRetentionDurationForExistingContainer() .replace(containerResponse .getProperties() .setChangeFeedPolicy( - ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(6)))); + ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(6)))); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); - assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) + assertThat(containerResponse.getProperties().getChangeFeedPolicy().getRetentionDurationForAllVersionsAndDeletesPolicy()) .isEqualTo(Duration.ofMinutes(6)); } diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/ChangeFeedStateTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/ChangeFeedStateTest.java index 2a30e56fb8308..fa7571a4e664f 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/ChangeFeedStateTest.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/ChangeFeedStateTest.java @@ -22,7 +22,7 @@ public class ChangeFeedStateTest { @Test(groups = "unit") - public void changeFeedState_startFromNow_PKRangeId_toJsonFromJson() { + public void changeFeedState_incrementalMode_startFromNow_PKRangeId_toJsonFromJson() { String containerRid = "/cols/" + UUID.randomUUID().toString(); String pkRangeId = UUID.randomUUID().toString(); FeedRangePartitionKeyRangeImpl feedRange = new FeedRangePartitionKeyRangeImpl(pkRangeId); @@ -104,6 +104,89 @@ public void changeFeedState_startFromNow_PKRangeId_toJsonFromJson() { assertThat(representationAfterDeserialization).isEqualTo(base64EncodedJsonRepresentation); } + @Test(groups = "unit") + public void changeFeedState_fullFidelityMode_startFromNow_PKRangeId_toJsonFromJson() { + String containerRid = "/cols/" + UUID.randomUUID().toString(); + String pkRangeId = UUID.randomUUID().toString(); + FeedRangePartitionKeyRangeImpl feedRange = new FeedRangePartitionKeyRangeImpl(pkRangeId); + ChangeFeedStartFromInternal startFromSettings = ChangeFeedStartFromInternal.createFromNow(); + ChangeFeedState stateWithoutContinuation = new ChangeFeedStateV1( + containerRid, + feedRange, + ChangeFeedMode.FULL_FIDELITY, + startFromSettings, + null); + + String base64EncodedJsonRepresentation = stateWithoutContinuation.toString(); + String jsonRepresentation = new String( + Base64.getUrlDecoder().decode(base64EncodedJsonRepresentation), + StandardCharsets.UTF_8); + assertThat(jsonRepresentation) + .isEqualTo( + String.format( + "{\"V\":1," + + "\"Rid\":\"%s\"," + + "\"Mode\":\"FULL_FIDELITY\"," + + "\"StartFrom\":{\"Type\":\"NOW\"}," + + "\"PKRangeId\":\"%s\"}", + containerRid, + pkRangeId)); + + assertThat(ChangeFeedState.fromString(base64EncodedJsonRepresentation)) + .isNotNull() + .isInstanceOf(ChangeFeedStateV1.class); + + ChangeFeedStateV1 stateWithoutContinuationDeserialized = + (ChangeFeedStateV1)ChangeFeedState.fromString(base64EncodedJsonRepresentation); + + String representationAfterDeserialization = stateWithoutContinuationDeserialized.toString(); + assertThat(representationAfterDeserialization).isEqualTo(base64EncodedJsonRepresentation); + + String continuationDummy = UUID.randomUUID().toString(); + String continuationJson = String.format( + "{\"V\":1," + + "\"Rid\":\"%s\"," + + "\"Continuation\":[" + + "{\"token\":\"%s\",\"range\":{\"min\":\"AA\",\"max\":\"BB\"}}," + + "{\"token\":\"%s\",\"range\":{\"min\":\"CC\",\"max\":\"DD\"}}" + + "]," + + "\"PKRangeId\":\"%s\"}", + containerRid, + continuationDummy, + continuationDummy, + pkRangeId); + + FeedRangeContinuation continuation = FeedRangeContinuation.convert(continuationJson); + + ChangeFeedState stateWithContinuation = + stateWithoutContinuation.setContinuation(continuation); + base64EncodedJsonRepresentation = stateWithContinuation.toString(); + jsonRepresentation = new String( + Base64.getUrlDecoder().decode(base64EncodedJsonRepresentation), + StandardCharsets.UTF_8); + + assertThat(jsonRepresentation) + .isEqualTo( + String.format( + "{\"V\":1," + + "\"Rid\":\"%s\"," + + "\"Mode\":\"FULL_FIDELITY\"," + + "\"StartFrom\":{\"Type\":\"NOW\"}," + + "\"Continuation\":%s}", + containerRid, + continuationJson)); + + assertThat(ChangeFeedState.fromString(base64EncodedJsonRepresentation)) + .isNotNull() + .isInstanceOf(ChangeFeedStateV1.class); + + ChangeFeedStateV1 stateWithContinuationDeserialized = + (ChangeFeedStateV1)ChangeFeedState.fromString(base64EncodedJsonRepresentation); + + representationAfterDeserialization = stateWithContinuationDeserialized.toString(); + assertThat(representationAfterDeserialization).isEqualTo(base64EncodedJsonRepresentation); + } + private ChangeFeedState createStateWithContinuation(String continuationAAToCC, String continuationCCToEE) { String containerRid = "/cols/" + UUID.randomUUID().toString(); diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/throughputControl/ThroughputControlTests.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/throughputControl/ThroughputControlTests.java index 5c8c4ef932813..890e6140ce6b8 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/throughputControl/ThroughputControlTests.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/implementation/throughputControl/ThroughputControlTests.java @@ -116,7 +116,7 @@ public void throughputGlobalControl(OperationType operationType) { CosmosAsyncContainer controlContainer = database.getContainer(controlContainerId); database .createContainerIfNotExists( - controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(100000)) + controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(10100)) .block(); try { @@ -161,7 +161,7 @@ public void throughputGlobalControlCanUpdateConfig(OperationType operationType) CosmosAsyncContainer controlContainer = database.getContainer(controlContainerId); database .createContainerIfNotExists( - controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(100000)) + controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(10100)) .block(); try { @@ -377,7 +377,7 @@ public void throughputGlobalControlMultipleClients() throws InterruptedException CosmosAsyncContainer controlContainer = database.getContainer(controlContainerId); database .createContainerIfNotExists( - controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(100000)) + controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(10100)) .block(); try { @@ -435,7 +435,7 @@ public void enableSameGroupMultipleTimes() { CosmosAsyncContainer controlContainer = database.getContainer(controlContainerId); database .createContainerIfNotExists( - controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(100000)) + controlContainer.getId(), "/groupId", ThroughputProperties.createManualThroughput(10100)) .block(); try { diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedFullFidelityTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedFullFidelityTest.java deleted file mode 100644 index a266c19d5a279..0000000000000 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedFullFidelityTest.java +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -package com.azure.cosmos.rx; - -import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.CosmosAsyncContainer; -import com.azure.cosmos.CosmosAsyncDatabase; -import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.implementation.TestConfigurations; -import com.azure.cosmos.models.CosmosChangeFeedRequestOptions; -import com.azure.cosmos.models.FeedRange; -import com.azure.cosmos.models.FeedResponse; -import com.azure.cosmos.util.CosmosPagedFlux; -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -public class ChangeFeedFullFidelityTest { - private static final String databaseId = "SampleDatabase"; - private static final String containerId = "GreenTaxiRecords"; - private static final Logger logger = LoggerFactory.getLogger(ChangeFeedFullFidelityTest.class); - private static final ObjectMapper objectMapper = new ObjectMapper(); - private static CosmosAsyncDatabase database; - private static CosmosAsyncContainer container; - private static Map> changeFeedMap = new ConcurrentHashMap<>(); - - - public static void main(String[] args) { - setup(); - runChangeFeedFullFidelityFromNow(); - } - - private static void setup() { - logger.info("Setting up"); - - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - - CosmosAsyncClient cosmosAsyncClient = new CosmosClientBuilder() - .endpoint(TestConfigurations.HOST) - .key(TestConfigurations.MASTER_KEY) - .contentResponseOnWriteEnabled(true) - .buildAsyncClient(); - - database = cosmosAsyncClient.getDatabase(databaseId); - container = database.getContainer(containerId); - } - - private static void runChangeFeedFullFidelityFromNow() { - - ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1); - executorService.scheduleAtFixedRate(ChangeFeedFullFidelityTest::checkChangeFeedMapDetails, - 30, 30, TimeUnit.SECONDS); - - CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions - .createForProcessingFromNow(FeedRange.forFullRange()); - options.fullFidelity(); - - String continuationToken = ""; - - CosmosPagedFlux cosmosPagedFlux = container.queryChangeFeed(options, JsonNode.class); - Iterator> iterator = cosmosPagedFlux.byPage().toIterable().iterator(); - - do { - while (iterator.hasNext()) { - FeedResponse next = iterator.next(); - List jsonNodes = next.getResults(); - for (JsonNode item : jsonNodes) { - try { - String operationType = item.get("metadata").get("operationType").asText(); - if (!changeFeedMap.containsKey(operationType)) { - changeFeedMap.put(operationType, new ArrayList<>()); - } - changeFeedMap.get(operationType).add(item); - } - catch (Exception e) { - if (item == null) { - logger.error("Received null item ", e); - } else { - logger.error("Error occurred for item : {}", item.toPrettyString(), e); - } - } - } - continuationToken = next.getContinuationToken(); - } - options = CosmosChangeFeedRequestOptions.createForProcessingFromContinuation(continuationToken); - cosmosPagedFlux = container.queryChangeFeed(options, JsonNode.class); - iterator = cosmosPagedFlux.byPage().toIterable().iterator(); - } while (continuationToken != null); - } - - private static void checkChangeFeedMapDetails() { - logger.info("Change feed map details are"); - changeFeedMap.forEach((key, value) -> { - logger.info("Operation type : {}, number of changes : {}", key, value.size()); - }); - } -} diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedProcessorTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedProcessorTest.java index 56c18d320c809..a56889a1797f7 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedProcessorTest.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedProcessorTest.java @@ -4,12 +4,15 @@ import com.azure.cosmos.ChangeFeedProcessor; import com.azure.cosmos.ChangeFeedProcessorBuilder; -import com.azure.cosmos.implementation.AsyncDocumentClient; -import com.azure.cosmos.models.ChangeFeedProcessorOptions; import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosAsyncContainer; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.implementation.AsyncDocumentClient; +import com.azure.cosmos.implementation.InternalObjectNode; +import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.implementation.changefeed.incremental.ServiceItemLease; +import com.azure.cosmos.models.ChangeFeedProcessorOptions; import com.azure.cosmos.models.ChangeFeedProcessorState; import com.azure.cosmos.models.CosmosContainerProperties; import com.azure.cosmos.models.CosmosContainerRequestOptions; @@ -19,9 +22,6 @@ import com.azure.cosmos.models.PartitionKey; import com.azure.cosmos.models.SqlParameter; import com.azure.cosmos.models.SqlQuerySpec; -import com.azure.cosmos.implementation.InternalObjectNode; -import com.azure.cosmos.implementation.Utils; -import com.azure.cosmos.implementation.changefeed.incremental.ServiceItemLease; import com.azure.cosmos.models.ThroughputProperties; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; @@ -67,9 +67,8 @@ public class ChangeFeedProcessorTest extends TestSuiteBase { private final String hostName = RandomStringUtils.randomAlphabetic(6); private final int FEED_COUNT = 10; private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; - private final int FEED_COLLECTION_THROUGHPUT_MAX = 20000; - private final int FEED_COLLECTION_THROUGHPUT = 10100; - private final int FEED_COLLECTION_THROUGHPUT_FOR_SPLIT = 400; + private final int FEED_COLLECTION_THROUGHPUT = 400; + private final int FEED_COLLECTION_THROUGHPUT_FOR_SPLIT = 10100; private final int LEASE_COLLECTION_THROUGHPUT = 400; private CosmosAsyncClient client; @@ -751,7 +750,7 @@ public void ownerNullAcquiring() throws InterruptedException { @Test(groups = { "simple" }, timeOut = 160 * CHANGE_FEED_PROCESSOR_TIMEOUT) public void readFeedDocumentsAfterSplit() throws InterruptedException { - CosmosAsyncContainer createdFeedCollectionForSplit = createFeedCollection(FEED_COLLECTION_THROUGHPUT_FOR_SPLIT); + CosmosAsyncContainer createdFeedCollectionForSplit = createFeedCollection(FEED_COLLECTION_THROUGHPUT); CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(2 * LEASE_COLLECTION_THROUGHPUT); CosmosAsyncContainer createdLeaseMonitorCollection = createLeaseMonitorCollection(LEASE_COLLECTION_THROUGHPUT); @@ -818,7 +817,7 @@ public void readFeedDocumentsAfterSplit() throws InterruptedException { .readThroughput().subscribeOn(Schedulers.boundedElastic()) .flatMap(currentThroughput -> createdFeedCollectionForSplit - .replaceThroughput(ThroughputProperties.createManualThroughput(FEED_COLLECTION_THROUGHPUT)) + .replaceThroughput(ThroughputProperties.createManualThroughput(FEED_COLLECTION_THROUGHPUT_FOR_SPLIT)) .subscribeOn(Schedulers.boundedElastic()) ) .then() @@ -936,7 +935,7 @@ public void readFeedDocumentsAfterSplit() throws InterruptedException { @Test(groups = { "emulator" }, timeOut = 20 * TIMEOUT) public void inactiveOwnersRecovery() throws InterruptedException { - CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT_MAX); + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); try { diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedTest.java index 63745297154b4..45527e67e75e7 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedTest.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/ChangeFeedTest.java @@ -80,7 +80,7 @@ static protected DocumentCollection getCollectionDefinition(boolean enableFullFi if (enableFullFidelity) { collectionDefinition.setChangeFeedPolicy( - ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(10))); + ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(10))); } return collectionDefinition; @@ -197,7 +197,7 @@ private void changeFeed_withUpdatesAndDelete(boolean enableFullFidelityChangeFee .createForProcessingFromNow(feedRange); if (enableFullFidelityChangeFeedMode) { - changeFeedOption = changeFeedOption.fullFidelity(); + changeFeedOption = changeFeedOption.allVersionsAndDeletes(); } List> changeFeedResultsList = client diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedProcessorTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedProcessorTest.java new file mode 100644 index 0000000000000..350f40e365689 --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedProcessorTest.java @@ -0,0 +1,993 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.rx; + +import com.azure.cosmos.ChangeFeedProcessor; +import com.azure.cosmos.ChangeFeedProcessorBuilder; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.implementation.InternalObjectNode; +import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.implementation.changefeed.fullfidelity.ServiceItemLeaseV1; +import com.azure.cosmos.models.ChangeFeedProcessorItem; +import com.azure.cosmos.models.ChangeFeedProcessorOptions; +import com.azure.cosmos.models.ChangeFeedProcessorState; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.CosmosQueryRequestOptions; +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlQuerySpec; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang3.RandomStringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class FullFidelityChangeFeedProcessorTest extends TestSuiteBase { + private final static Logger log = LoggerFactory.getLogger(FullFidelityChangeFeedProcessorTest.class); + private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); + + private CosmosAsyncDatabase createdDatabase; + private final String hostName = RandomStringUtils.randomAlphabetic(6); + private final int FEED_COUNT = 10; + private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000; + private final int FEED_COLLECTION_THROUGHPUT = 400; + private final int LEASE_COLLECTION_THROUGHPUT = 400; + + private CosmosAsyncClient client; + + @Factory(dataProvider = "clientBuilders") + public FullFidelityChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + // Using this test to verify basic functionality + @Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT) + public void fullFidelityChangeFeedProcessorStartFromNow() throws InterruptedException { + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + List createdDocuments = new ArrayList<>(); + Map receivedDocuments = new ConcurrentHashMap<>(); + ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions(); + ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder() + .options(changeFeedProcessorOptions) + .hostName(hostName) + .handleAllVersionsAndDeletesChanges((List docs) -> { + log.info("START processing from thread {}", Thread.currentThread().getId()); + for (ChangeFeedProcessorItem item : docs) { + processItem(item, receivedDocuments); + } + log.info("END processing from thread {}", Thread.currentThread().getId()); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .buildChangeFeedProcessor(); + + try { + changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + logger.info("Starting ChangeFeed processor"); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + logger.info("Finished starting ChangeFeed processor"); + + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + logger.info("Set up read feed documents"); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + logger.info("Validating changes now"); + + validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); + + // Wait for the feed processor to shut down. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + } catch (Exception ex) { + log.error("Change feed processor did not start and stopped in the expected time", ex); + throw ex; + } + + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + + // Using this test to verify basic functionality + @Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT) + public void fullFidelityChangeFeedProcessorStartFromContinuationToken() throws InterruptedException { + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + List createdDocuments = new ArrayList<>(); + Map receivedDocuments = new ConcurrentHashMap<>(); + ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions(); + ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder() + .options(changeFeedProcessorOptions) + .hostName(hostName) + .handleAllVersionsAndDeletesChanges((List docs) -> { + log.info("START processing from thread {}", Thread.currentThread().getId()); + for (ChangeFeedProcessorItem item : docs) { + processItem(item, receivedDocuments); + } + log.info("END processing from thread {}", Thread.currentThread().getId()); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .buildChangeFeedProcessor(); + + try { + changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + logger.info("Starting ChangeFeed processor"); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + logger.info("Finished starting ChangeFeed processor"); + + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + logger.info("Set up read feed documents"); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + logger.info("Validating changes now"); + + validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); + + // Wait for the feed processor to shut down. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + } catch (Exception ex) { + log.error("Change feed processor did not start and stopped in the expected time", ex); + throw ex; + } + + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + @Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false) + public void getCurrentState() throws InterruptedException { + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + List createdDocuments = new ArrayList<>(); + Map receivedDocuments = new ConcurrentHashMap<>(); + ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder() + .hostName(hostName) + .handleAllVersionsAndDeletesChanges((List docs) -> { + log.info("START processing from thread {}", Thread.currentThread().getId()); + for (ChangeFeedProcessorItem item : docs) { + processItem(item, receivedDocuments); + } + log.info("END processing from thread {}", Thread.currentThread().getId()); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .buildChangeFeedProcessor(); + + ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder() + .hostName("side-cart") + .handleAllVersionsAndDeletesChanges((List docs) -> { + fail("ERROR - we should not execute this handler"); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .buildChangeFeedProcessor(); + + try { + changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .then(Mono.just(changeFeedProcessorMain) + .delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .flatMap(value -> changeFeedProcessorMain.stop() + .subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + )) + .subscribe(); + } catch (Exception ex) { + log.error("Change feed processor did not start and stopped in the expected time", ex); + throw ex; + } + + Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + // Test for "zero" lag + List cfpCurrentState = changeFeedProcessorMain.getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0."); + + int totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentState) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start"); + + // check the side cart CFP instance + List cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0."); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start"); + + + // Test for "FEED_COUNT total lag + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + + cfpCurrentState = changeFeedProcessorMain.getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentState) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag"); + + // check the side cart CFP instance + cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0."); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag"); + + + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + @Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false) + public void getCurrentStateWithInsertedDocuments() throws InterruptedException { + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + List createdDocuments = new ArrayList<>(); + Map receivedDocuments = new ConcurrentHashMap<>(); + ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder() + .hostName(hostName) + .handleAllVersionsAndDeletesChanges((List docs) -> { + log.info("START processing from thread {}", Thread.currentThread().getId()); + for (ChangeFeedProcessorItem item : docs) { + processItem(item, receivedDocuments); + } + log.info("END processing from thread {}", Thread.currentThread().getId()); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .buildChangeFeedProcessor(); + + ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder() + .hostName("side-cart") + .handleAllVersionsAndDeletesChanges((List docs) -> { + fail("ERROR - we should not execute this handler"); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .buildChangeFeedProcessor(); + + try { + changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + } catch (Exception ex) { + log.error("Change feed processor did not start and stopped in the expected time", ex); + throw ex; + } + + Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + // Test for "zero" lag + List cfpCurrentState = changeFeedProcessorMain.getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0."); + + int totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentState) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start"); + + // check the side cart CFP instance + List cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0."); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start"); + + + // Test for "FEED_COUNT total lag + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + + // Waiting for change feed processor to process documents + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + cfpCurrentState = changeFeedProcessorMain.getCurrentState() + .map(state -> { + try { + log.info("Current state of main after inserting documents is : {}", + OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentState) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag"); + + // check the side cart CFP instance + cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState() + .map(state -> { + try { + log.info("Current state of side cart after inserting documents is : {}", + OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0."); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag"); + + changeFeedProcessorMain.stop().subscribe(); + + // Waiting for change feed processor to stop + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + // Test for "FEED_COUNT total lag + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + + cfpCurrentState = changeFeedProcessorMain.getCurrentState() + .map(state -> { + try { + log.info("Current state of main after stopping is : {}", + OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentState) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag"); + + // check the side cart CFP instance + cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState() + .map(state -> { + try { + log.info("Current state of side cart after stopping is : {}", + OBJECT_MAPPER.writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }).block(); + + assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0."); + + totalLag = 0; + for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) { + totalLag += item.getEstimatedLag(); + } + + assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag"); + + + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + @Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false) + public void staledLeaseAcquiring() throws InterruptedException { + final String ownerFirst = "Owner_First"; + final String ownerSecond = "Owner_Second"; + final String leasePrefix = "TEST"; + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + Map receivedDocuments = new ConcurrentHashMap<>(); + + ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder() + .hostName(ownerFirst) + .handleAllVersionsAndDeletesChanges(docs -> { + log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); + log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .options(new ChangeFeedProcessorOptions() + .setLeasePrefix(leasePrefix) + ) + .buildChangeFeedProcessor(); + + ChangeFeedProcessor changeFeedProcessorSecond = new ChangeFeedProcessorBuilder() + .hostName(ownerSecond) + .handleAllVersionsAndDeletesChanges((List docs) -> { + log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); + for (ChangeFeedProcessorItem item : docs) { + processItem(item, receivedDocuments); + } + log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .options(new ChangeFeedProcessorOptions() + .setLeaseRenewInterval(Duration.ofSeconds(10)) + .setLeaseAcquireInterval(Duration.ofSeconds(5)) + .setLeaseExpirationInterval(Duration.ofSeconds(20)) + .setFeedPollDelay(Duration.ofSeconds(2)) + .setLeasePrefix(leasePrefix) + .setMaxItemCount(10) + .setMaxScaleCount(0) // unlimited + ) + .buildChangeFeedProcessor(); + + changeFeedProcessorFirst + .start() + .subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .then(Mono.just(changeFeedProcessorFirst) + .delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .flatMap(value -> + changeFeedProcessorFirst.stop() + .subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + )) + .subscribe(); + + try { + // Wait for the feed processor to shut down. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted exception", e); + } + log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first"); + + SqlParameter param = new SqlParameter(); + param.setName("@PartitionLeasePrefix"); + param.setValue(leasePrefix); + SqlQuerySpec querySpec = new SqlQuerySpec( + "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param)); + + CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); + + createdLeaseCollection + .queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage() + .flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) + .flatMap(doc -> { + ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc); + leaseDocument.setOwner("TEMP_OWNER"); + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options) + .map(CosmosItemResponse::getItem); + }) + .map(leaseDocument -> { + log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner()); + return leaseDocument; + }) + .blockLast(); + + changeFeedProcessorSecond + .start() + .subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + + // Wait for the feed processor to start. + Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + List docDefList = new ArrayList<>(); + for (int i = 0; i < FEED_COUNT; i++) { + docDefList.add(getDocumentDefinition()); + } + + bulkInsert(createdFeedCollection, docDefList, FEED_COUNT).blockLast(); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + long remainingWork = 10 * CHANGE_FEED_PROCESSOR_TIMEOUT; + while (remainingWork > 0 && changeFeedProcessorFirst.isStarted() && !changeFeedProcessorSecond.isStarted()) { + remainingWork -= 100; + Thread.sleep(100); + } + assertThat(changeFeedProcessorSecond.isStarted()).as("Change Feed Processor instance is running").isTrue(); + + // Wait for the feed processor to receive and process the documents. + waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT); + + changeFeedProcessorSecond.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); + + // Wait for the feed processor to shut down. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + logger.info("DONE"); + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + @Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false) + public void ownerNullAcquiring() throws InterruptedException { + final String ownerFirst = "Owner_First"; + final String leasePrefix = "TEST"; + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + List createdDocuments = new ArrayList<>(); + Map receivedDocuments = new ConcurrentHashMap<>(); + + ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder() + .hostName(ownerFirst) + .handleAllVersionsAndDeletesChanges(docs -> { + logger.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); + for (ChangeFeedProcessorItem item : docs) { + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) { + } + processItem(item, receivedDocuments); + } + logger.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst); + }) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .options(new ChangeFeedProcessorOptions() + .setLeasePrefix(leasePrefix) + .setLeaseRenewInterval(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .setLeaseAcquireInterval(Duration.ofMillis(5 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .setLeaseExpirationInterval(Duration.ofMillis(6 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .setFeedPollDelay(Duration.ofSeconds(5)) + ) + .buildChangeFeedProcessor(); + + try { + logger.info("Start more creating documents"); + List docDefList = new ArrayList<>(); + + for (int i = 0; i < FEED_COUNT; i++) { + docDefList.add(getDocumentDefinition()); + } + + bulkInsert(createdFeedCollection, docDefList, FEED_COUNT) + .last() + .flatMap(cosmosItemResponse -> { + logger.info("Start first Change feed processor"); + return changeFeedProcessorFirst.start().subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)); + }) + .then( + Mono.just(changeFeedProcessorFirst) + .flatMap( value -> { + logger.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first"); + try { + Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (InterruptedException ignored) { + } + + logger.info("QueryItems before Change feed processor processing"); + + SqlParameter param1 = new SqlParameter(); + param1.setName("@PartitionLeasePrefix"); + param1.setValue(leasePrefix); + SqlParameter param2 = new SqlParameter(); + param2.setName("@Owner"); + param2.setValue(ownerFirst); + + SqlQuerySpec querySpec = new SqlQuerySpec( + "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix) AND c.Owner=@Owner", Arrays.asList(param1, param2)); + + CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); + + return createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage() + .flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.getResults())) + .flatMap(doc -> { + ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc); + leaseDocument.setOwner(null); + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options) + .map(CosmosItemResponse::getItem); + }) + .map(leaseDocument -> { + logger.info("QueryItems after Change feed processor processing; current Owner is'{}'", leaseDocument.getOwner()); + return leaseDocument; + }) + .last() + .flatMap(leaseDocument -> { + logger.info("Start creating more documents"); + List docDefList1 = new ArrayList<>(); + + for (int i = 0; i < FEED_COUNT; i++) { + docDefList1.add(getDocumentDefinition()); + } + + return bulkInsert(createdFeedCollection, docDefList1, FEED_COUNT) + .last(); + }); + })) + .subscribe(); + } catch (Exception ex) { + log.error("First change feed processor did not start in the expected time", ex); + throw ex; + } + + long remainingWork = 20 * CHANGE_FEED_PROCESSOR_TIMEOUT; + while (remainingWork > 0 && !changeFeedProcessorFirst.isStarted()) { + remainingWork -= 100; + Thread.sleep(100); + } + + // Wait for the feed processor to receive and process the documents. + waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT); + + assertThat(changeFeedProcessorFirst.isStarted()).as("Change Feed Processor instance is running").isTrue(); + + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + changeFeedProcessorFirst.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe(); + + // Wait for the feed processor to shutdown. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + @Test(groups = { "emulator" }, timeOut = 20 * TIMEOUT, enabled = false) + public void inactiveOwnersRecovery() throws InterruptedException { + CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT); + CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT); + + try { + List createdDocuments = new ArrayList<>(); + Map receivedDocuments = new ConcurrentHashMap<>(); + String leasePrefix = "TEST"; + + ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder() + .hostName(hostName) + .handleAllVersionsAndDeletesChanges(fullFidelityChangeFeedProcessorHandler(receivedDocuments)) + .feedContainer(createdFeedCollection) + .leaseContainer(createdLeaseCollection) + .options(new ChangeFeedProcessorOptions() + .setLeaseRenewInterval(Duration.ofSeconds(1)) + .setLeaseAcquireInterval(Duration.ofSeconds(1)) + .setLeaseExpirationInterval(Duration.ofSeconds(5)) + .setFeedPollDelay(Duration.ofSeconds(1)) + .setLeasePrefix(leasePrefix) + .setMaxItemCount(100) + .setMaxScaleCount(0) // unlimited + //.setScheduler(Schedulers.boundedElastic()) + .setScheduler(Schedulers.newParallel("CFP parallel", + 10 * Schedulers.DEFAULT_POOL_SIZE, + true)) + ) + .buildChangeFeedProcessor(); + + try { + changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic()) + .timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)) + .subscribe(); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + } catch (Exception ex) { + log.error("Change feed processor did not start in the expected time", ex); + throw ex; + } + + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments,2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); + + log.info("Update leases with random owners"); + + SqlParameter param1 = new SqlParameter(); + param1.setName("@PartitionLeasePrefix"); + param1.setValue(leasePrefix); + + SqlQuerySpec querySpec = new SqlQuerySpec( + "SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Arrays.asList(param1)); + + CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); + + createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage() + .flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults())) + .flatMap(doc -> { + ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc); + leaseDocument.setOwner(RandomStringUtils.randomAlphabetic(10)); + CosmosItemRequestOptions options = new CosmosItemRequestOptions(); + return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options) + .map(CosmosItemResponse::getItem); + }) + .flatMap(leaseDocument -> createdLeaseCollection.readItem(leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), InternalObjectNode.class)) + .map(doc -> { + ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc.getItem()); + log.info("Change feed processor current Owner is'{}'", leaseDocument.getOwner()); + return leaseDocument; + }) + .blockLast(); + + createdDocuments.clear(); + receivedDocuments.clear(); + setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT); + + // Wait for the feed processor to receive and process the documents. + Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT); + validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT); + + // Wait for the feed processor to shutdown. + Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT); + } finally { + safeDeleteCollection(createdFeedCollection); + safeDeleteCollection(createdLeaseCollection); + + // Allow some time for the collections to be deleted before exiting. + Thread.sleep(500); + } + } + + void validateChangeFeedProcessing(ChangeFeedProcessor changeFeedProcessor, List createdDocuments, Map receivedDocuments, int sleepTime) throws InterruptedException { + assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue(); + + List cfpCurrentState = changeFeedProcessor + .getCurrentState() + .map(state -> { + try { + log.info(OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(state)); + } catch (JsonProcessingException ex) { + log.error("Unexpected", ex); + } + return state; + }) + .block(); + + assertThat(cfpCurrentState).isNotNull().as("Change Feed Processor current state"); + + for (ChangeFeedProcessorState item : cfpCurrentState) { + assertThat(item.getHostName()).isEqualTo(hostName).as("Change Feed Processor ownership"); + } + + // Added this validation for now to verify received list has something - easy way to see size not being 10 + assertThat(receivedDocuments.size()).isEqualTo(FEED_COUNT); + + for (InternalObjectNode item : createdDocuments) { + assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue(); + } + } + + private Consumer> fullFidelityChangeFeedProcessorHandler(Map receivedDocuments) { + return docs -> { + log.info("START processing from thread in test {}", Thread.currentThread().getId()); + for (ChangeFeedProcessorItem item : docs) { + processItem(item, receivedDocuments); + } + log.info("END processing from thread {}", Thread.currentThread().getId()); + }; + } + + private void waitToReceiveDocuments(Map receivedDocuments, long timeoutInMillisecond, long count) throws InterruptedException { + long remainingWork = timeoutInMillisecond; + while (remainingWork > 0 && receivedDocuments.size() < count) { + remainingWork -= 100; + Thread.sleep(200); + } + + assertThat(remainingWork > 0).as("Failed to receive all the feed documents").isTrue(); + } + + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) + public void before_ChangeFeedProcessorTest() { + client = getClientBuilder().buildAsyncClient(); + createdDatabase = getSharedCosmosDatabase(client); + } + + @AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeClose(client); + } + + private void setupReadFeedDocuments(List createdDocuments, Map receivedDocuments, CosmosAsyncContainer feedCollection, long count) { + List docDefList = new ArrayList<>(); + + for(int i = 0; i < count; i++) { + InternalObjectNode item = getDocumentDefinition(); + docDefList.add(item); + logger.info("Adding the following item to bulk list: {}", item); + } + + createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList)); + waitIfNeededForReplicasToCatchUp(getClientBuilder()); + } + + private void createReadFeedDocuments(List createdDocuments, CosmosAsyncContainer feedCollection, long count) { + List docDefList = new ArrayList<>(); + + for(int i = 0; i < count; i++) { + docDefList.add(getDocumentDefinition()); + } + + createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList)); + waitIfNeededForReplicasToCatchUp(getClientBuilder()); + } + + private InternalObjectNode getDocumentDefinition() { + String uuid = UUID.randomUUID().toString(); + InternalObjectNode doc = new InternalObjectNode(String.format("{ " + + "\"id\": \"%s\", " + + "\"mypk\": \"%s\", " + + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + + "}" + , uuid, uuid)); + return doc; + } + + private CosmosAsyncContainer createFeedCollection(int provisionedThroughput) { + CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions(); + return createCollection(createdDatabase, getCollectionDefinitionWithFullFidelity(), optionsFeedCollection, provisionedThroughput); + } + + private CosmosAsyncContainer createLeaseCollection(int provisionedThroughput) { + CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); + CosmosContainerProperties collectionDefinition = new CosmosContainerProperties( + "leases_" + UUID.randomUUID(), + "/id"); + return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput); + } + + private static synchronized void processItem(ChangeFeedProcessorItem item, Map receivedDocuments) { + log.info("RECEIVED {}", item); + receivedDocuments.put(item.getCurrent().get("id").asText(), item); + } +} \ No newline at end of file diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedTest.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedTest.java index 461a07d2cc6a5..6557fea35c011 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedTest.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/FullFidelityChangeFeedTest.java @@ -2,42 +2,29 @@ // Licensed under the MIT License. package com.azure.cosmos.rx; -import com.azure.cosmos.ConsistencyLevel; -import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.CosmosContainer; -import com.azure.cosmos.CosmosDatabase; -import com.azure.cosmos.implementation.AsyncDocumentClient; -import com.azure.cosmos.implementation.Database; -import com.azure.cosmos.implementation.DocumentCollection; -import com.azure.cosmos.implementation.TestConfigurations; -import com.azure.cosmos.implementation.TestSuiteBase; -import com.azure.cosmos.implementation.TestUtils; import com.azure.cosmos.implementation.throughputControl.TestItem; import com.azure.cosmos.models.ChangeFeedPolicy; import com.azure.cosmos.models.CosmosChangeFeedRequestOptions; import com.azure.cosmos.models.CosmosContainerProperties; -import com.azure.cosmos.models.CosmosContainerResponse; -import com.azure.cosmos.models.CosmosDatabaseResponse; import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.FeedRange; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; -import com.azure.cosmos.models.PartitionKeyDefinition; import com.fasterxml.jackson.databind.JsonNode; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; import org.testng.annotations.Test; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; import java.time.Duration; -import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.UUID; -import static java.lang.annotation.ElementType.METHOD; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; @@ -45,423 +32,389 @@ public class FullFidelityChangeFeedTest extends TestSuiteBase { private static final int SETUP_TIMEOUT = 40000; private static final int TIMEOUT = 30000; - private static final String PartitionKeyFieldName = "mypk"; - private Database createdDatabase; - private DocumentCollection createdCollection; + private CosmosAsyncDatabase createdDatabase; + private CosmosAsyncClient client; - private AsyncDocumentClient client; - - public String getCollectionLink() { - return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); - } - - public FullFidelityChangeFeedTest() { - super(createGatewayRxDocumentClient()); - subscriberValidationTimeout = TIMEOUT; + @Factory(dataProvider = "simpleClientBuildersWithDirectTcp") + public FullFidelityChangeFeedTest(CosmosClientBuilder cosmosClientBuilder) { + super(cosmosClientBuilder); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void fullFidelityChangeFeed_FromNowForLogicalPartition() throws Exception { - CosmosContainer cosmosContainer = initializeFFCFContainer(2); - CosmosChangeFeedRequestOptions options1 = CosmosChangeFeedRequestOptions - .createForProcessingFromNow(FeedRange.forLogicalPartition(new PartitionKey("mypk-1"))); - options1.fullFidelity(); - - Iterator> results1 = cosmosContainer - .queryChangeFeed(options1, JsonNode.class) - .iterableByPage() - .iterator(); - - String continuationToken1 = ""; - while (results1.hasNext()) { - FeedResponse response = results1.next(); - continuationToken1 = response.getContinuationToken(); - } - - TestItem item1 = new TestItem( - UUID.randomUUID().toString(), - "mypk-1", "Johnson"); - TestItem item2 = new TestItem( - UUID.randomUUID().toString(), - "mypk-1", "Smith"); - TestItem item3 = new TestItem( - UUID.randomUUID().toString(), - "mypk-2", "John"); - cosmosContainer.createItem(item1); - cosmosContainer.createItem(item2); - String originalLastNameItem1 = item1.getProp(); - item1.setProp("Gates"); - cosmosContainer.upsertItem(item1); - String originalLastNameItem2 = item2.getProp(); - item2.setProp("Doe"); - cosmosContainer.upsertItem(item2); - cosmosContainer.deleteItem(item1, new CosmosItemRequestOptions()); - - options1 = CosmosChangeFeedRequestOptions - .createForProcessingFromContinuation(continuationToken1); - options1.fullFidelity(); - - results1 = cosmosContainer - .queryChangeFeed(options1, JsonNode.class) - .iterableByPage() - .iterator(); - - // Check item2 deleted with TTL - // TODO: this is not working - item does get deleted but it won't show up in CF - logger.info("{} going to sleep for 5 seconds to populate ttl delete", Thread.currentThread().getName()); - Thread.sleep(5 * 1000); - - if (results1.hasNext()) { - FeedResponse response = results1.next(); - List itemChanges = response.getResults(); - assertGatewayMode(response); - assertThat(itemChanges.size()).isEqualTo(5); - // Assert initial creation of items - assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastNameItem1); - assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); - assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item2.getId()); - assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(originalLastNameItem2); - assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("create"); - // Assert replace of item1 - assertThat(itemChanges.get(2).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(2).get("current").get("prop").asText()).isEqualTo(item1.getProp()); - assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("replace"); - if (itemChanges.get(2).get("previous") != null) { - assertThat(itemChanges.get(2).get("previous")).isEqualTo(itemChanges.get(0).get("current")); + CosmosAsyncContainer cosmosContainer = initializeFFCFContainer(); + try { + CosmosChangeFeedRequestOptions options1 = CosmosChangeFeedRequestOptions + .createForProcessingFromNow(FeedRange.forLogicalPartition(new PartitionKey("mypk-1"))); + options1.allVersionsAndDeletes(); + + Iterator> results1 = cosmosContainer + .queryChangeFeed(options1, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + String continuationToken1 = ""; + while (results1.hasNext()) { + FeedResponse response = results1.next(); + continuationToken1 = response.getContinuationToken(); } - // Assert replace of item2 - assertThat(itemChanges.get(3).get("current").get("id").asText()).isEqualTo(item2.getId()); - assertThat(itemChanges.get(3).get("current").get("prop").asText()).isEqualTo(item2.getProp()); - assertThat(itemChanges.get(3).get("metadata").get("operationType").asText()).isEqualTo("replace"); - if (itemChanges.get(3).get("previous") != null) { - assertThat(itemChanges.get(3).get("previous")).isEqualTo(itemChanges.get(1).get("current")); + + TestItem item1 = new TestItem( + UUID.randomUUID().toString(), + "mypk-1", "Johnson"); + TestItem item2 = new TestItem( + UUID.randomUUID().toString(), + "mypk-1", "Smith"); + TestItem item3 = new TestItem( + UUID.randomUUID().toString(), + "mypk-2", "John"); + cosmosContainer.createItem(item1).block(); + cosmosContainer.createItem(item2).block(); + String originalLastNameItem1 = item1.getProp(); + item1.setProp("Gates"); + cosmosContainer.upsertItem(item1).block(); + String originalLastNameItem2 = item2.getProp(); + item2.setProp("Doe"); + cosmosContainer.upsertItem(item2).block(); + cosmosContainer.deleteItem(item1, new CosmosItemRequestOptions()).block(); + + options1 = CosmosChangeFeedRequestOptions + .createForProcessingFromContinuation(continuationToken1); + options1.allVersionsAndDeletes(); + + results1 = cosmosContainer + .queryChangeFeed(options1, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + if (results1.hasNext()) { + FeedResponse response = results1.next(); + List itemChanges = response.getResults(); + assertThat(itemChanges.size()).isEqualTo(5); + // Assert initial creation of items + assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastNameItem1); + assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); + assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item2.getId()); + assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(originalLastNameItem2); + assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("create"); + // Assert replace of item1 + assertThat(itemChanges.get(2).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(2).get("current").get("prop").asText()).isEqualTo(item1.getProp()); + assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("replace"); + if (itemChanges.get(2).get("previous") != null) { + assertThat(itemChanges.get(2).get("previous")).isEqualTo(itemChanges.get(0).get("current")); + } + // Assert replace of item2 + assertThat(itemChanges.get(3).get("current").get("id").asText()).isEqualTo(item2.getId()); + assertThat(itemChanges.get(3).get("current").get("prop").asText()).isEqualTo(item2.getProp()); + assertThat(itemChanges.get(3).get("metadata").get("operationType").asText()).isEqualTo("replace"); + if (itemChanges.get(3).get("previous") != null) { + assertThat(itemChanges.get(3).get("previous")).isEqualTo(itemChanges.get(1).get("current")); + } + // Assert delete of item1 + assertThat(itemChanges.get(4).get("previous").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(4).get("current")).isEmpty(); + assertThat(itemChanges.get(4).get("metadata").get("operationType").asText()).isEqualTo("delete"); + assertThat(itemChanges.get(4).get("metadata").get("previousImageLSN").asText() + ).isEqualTo(itemChanges.get(2).get("metadata").get("lsn").asText()); + } else { + fail("change feed missing results"); } - // Assert delete of item1 - assertThat(itemChanges.get(4).get("previous").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(4).get("current")).isEmpty(); - assertThat(itemChanges.get(4).get("metadata").get("operationType").asText()).isEqualTo("delete"); - assertThat(itemChanges.get(4).get("metadata").get("previousImageLSN").asText() - ).isEqualTo(itemChanges.get(2).get("metadata").get("lsn").asText()); - // Assert item2 deleted with TTL - // TODO: Missing TTL logic - } else { - fail("change feed missing results"); - } - CosmosChangeFeedRequestOptions options2 = CosmosChangeFeedRequestOptions - .createForProcessingFromNow(FeedRange.forLogicalPartition(new PartitionKey("mypk-2"))); - options2.fullFidelity(); + CosmosChangeFeedRequestOptions options2 = CosmosChangeFeedRequestOptions + .createForProcessingFromNow(FeedRange.forLogicalPartition(new PartitionKey("mypk-2"))); + options2.allVersionsAndDeletes(); - Iterator> results2 = cosmosContainer - .queryChangeFeed(options2, JsonNode.class) - .iterableByPage() - .iterator(); + Iterator> results2 = cosmosContainer + .queryChangeFeed(options2, JsonNode.class) + .byPage() + .toIterable() + .iterator(); - String continuationToken2 = ""; - while (results2.hasNext()) { - FeedResponse response = results2.next(); - continuationToken2 = response.getContinuationToken(); - } + String continuationToken2 = ""; + while (results2.hasNext()) { + FeedResponse response = results2.next(); + continuationToken2 = response.getContinuationToken(); + } - cosmosContainer.createItem(item3); - String originalLastNameItem3 = item3.getProp(); - item3.setProp("Potter"); - cosmosContainer.upsertItem(item3); - cosmosContainer.deleteItem(item3, new CosmosItemRequestOptions()); - - options2 = CosmosChangeFeedRequestOptions - .createForProcessingFromContinuation(continuationToken2); - options2.fullFidelity(); - - results2 = cosmosContainer - .queryChangeFeed(options2, JsonNode.class) - .iterableByPage() - .iterator(); - - if (results2.hasNext()) { - FeedResponse response = results2.next(); - List itemChanges = response.getResults(); - assertGatewayMode(response); - assertThat(itemChanges.size()).isEqualTo(3); - // Assert initial creation of item3 - assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item3.getId()); - assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastNameItem3); - assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); - // Assert replace of item3 - assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item3.getId()); - assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(item3.getProp()); - assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("replace"); - if (itemChanges.get(1).get("previous") != null) { - assertThat(itemChanges.get(1).get("previous")).isEqualTo(itemChanges.get(0).get("current")); + cosmosContainer.createItem(item3).block(); + String originalLastNameItem3 = item3.getProp(); + item3.setProp("Potter"); + cosmosContainer.upsertItem(item3).block(); + cosmosContainer.deleteItem(item3, new CosmosItemRequestOptions()).block(); + + options2 = CosmosChangeFeedRequestOptions + .createForProcessingFromContinuation(continuationToken2); + options2.allVersionsAndDeletes(); + + results2 = cosmosContainer + .queryChangeFeed(options2, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + if (results2.hasNext()) { + FeedResponse response = results2.next(); + List itemChanges = response.getResults(); + assertThat(itemChanges.size()).isEqualTo(3); + // Assert initial creation of item3 + assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item3.getId()); + assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastNameItem3); + assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); + // Assert replace of item3 + assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item3.getId()); + assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(item3.getProp()); + assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("replace"); + if (itemChanges.get(1).get("previous") != null) { + assertThat(itemChanges.get(1).get("previous")).isEqualTo(itemChanges.get(0).get("current")); + } + // Assert delete of item3 + assertThat(itemChanges.get(2).get("previous").get("id").asText()).isEqualTo(item3.getId()); + assertThat(itemChanges.get(2).get("current")).isEmpty(); + assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("delete"); + assertThat(itemChanges.get(2).get("metadata").get("previousImageLSN").asText() + ).isEqualTo(itemChanges.get(1).get("metadata").get("lsn").asText()); + } else { + fail("change feed missing results"); } - // Assert delete of item3 - assertThat(itemChanges.get(2).get("previous").get("id").asText()).isEqualTo(item3.getId()); - assertThat(itemChanges.get(2).get("current")).isEmpty(); - assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("delete"); - assertThat(itemChanges.get(2).get("metadata").get("previousImageLSN").asText() - ).isEqualTo(itemChanges.get(1).get("metadata").get("lsn").asText()); - } else { - fail("change feed missing results"); + } finally { + safeDeleteCollection(cosmosContainer); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void fullFidelityChangeFeed_FromContinuationToken() throws Exception { - CosmosContainer cosmosContainer = initializeFFCFContainer(2); - CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions - .createForProcessingFromNow(FeedRange.forFullRange()); - options.fullFidelity(); - - Iterator> results = cosmosContainer - .queryChangeFeed(options, JsonNode.class) - .iterableByPage() - .iterator(); - - String continuationToken = null; - while (results.hasNext()) { - FeedResponse response = results.next(); - continuationToken = response.getContinuationToken(); - } + CosmosAsyncContainer cosmosContainer = initializeFFCFContainer(); + try { + CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions + .createForProcessingFromNow(FeedRange.forFullRange()); + options.allVersionsAndDeletes(); + + Iterator> results = cosmosContainer + .queryChangeFeed(options, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + String continuationToken = null; + while (results.hasNext()) { + FeedResponse response = results.next(); + continuationToken = response.getContinuationToken(); + } - options = CosmosChangeFeedRequestOptions - .createForProcessingFromContinuation(continuationToken); - options.fullFidelity(); - - TestItem item1 = new TestItem( - UUID.randomUUID().toString(), - "mypk", "Johnson"); - TestItem item2 = new TestItem( - UUID.randomUUID().toString(), - "mypk", "Smith"); - cosmosContainer.upsertItem(item1); - cosmosContainer.upsertItem(item2); - String originalLastName = item1.getProp(); - item1.setProp("Gates"); - cosmosContainer.upsertItem(item1); - cosmosContainer.deleteItem(item1, new CosmosItemRequestOptions()); - - // Check item2 deleted with TTL - // TODO: this is not working - item does get deleted but it won't show up in CF - logger.info("{} going to sleep for 5 seconds to populate ttl delete", Thread.currentThread().getName()); - Thread.sleep(5 * 1000); - - results = cosmosContainer - .queryChangeFeed(options, JsonNode.class) - .iterableByPage() - .iterator(); - - if (results.hasNext()) { - FeedResponse response = results.next(); - List itemChanges = response.getResults(); - assertGatewayMode(response); - assertThat(itemChanges.size()).isEqualTo(4); - // Assert initial creation of items - assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastName); - assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); - assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item2.getId()); - assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(item2.getProp()); - assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("create"); - // Assert replace of item1 - assertThat(itemChanges.get(2).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(2).get("current").get("prop").asText()).isEqualTo(item1.getProp()); - assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("replace"); - if (itemChanges.get(2).get("previous") != null) { - assertThat(itemChanges.get(2).get("previous")).isEqualTo(itemChanges.get(0).get("current")); + options = CosmosChangeFeedRequestOptions + .createForProcessingFromContinuation(continuationToken); + options.allVersionsAndDeletes(); + + TestItem item1 = new TestItem( + UUID.randomUUID().toString(), + "mypk", "Johnson"); + TestItem item2 = new TestItem( + UUID.randomUUID().toString(), + "mypk", "Smith"); + cosmosContainer.upsertItem(item1).block(); + cosmosContainer.upsertItem(item2).block(); + String originalLastName = item1.getProp(); + item1.setProp("Gates"); + cosmosContainer.upsertItem(item1).block(); + cosmosContainer.deleteItem(item1, new CosmosItemRequestOptions()).block(); + + results = cosmosContainer + .queryChangeFeed(options, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + if (results.hasNext()) { + FeedResponse response = results.next(); + List itemChanges = response.getResults(); + assertThat(itemChanges.size()).isEqualTo(4); + // Assert initial creation of items + assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastName); + assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); + assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item2.getId()); + assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(item2.getProp()); + assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("create"); + // Assert replace of item1 + assertThat(itemChanges.get(2).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(2).get("current").get("prop").asText()).isEqualTo(item1.getProp()); + assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("replace"); + if (itemChanges.get(2).get("previous") != null) { + assertThat(itemChanges.get(2).get("previous")).isEqualTo(itemChanges.get(0).get("current")); + } + // Assert delete of item1 + assertThat(itemChanges.get(3).get("previous").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(3).get("current")).isEmpty(); + assertThat(itemChanges.get(3).get("metadata").get("operationType").asText()).isEqualTo("delete"); + assertThat(itemChanges.get(3).get("metadata").get("previousImageLSN").asText() + ).isEqualTo(itemChanges.get(2).get("metadata").get("lsn").asText()); + } else { + fail("change feed missing results"); } - // Assert delete of item1 - assertThat(itemChanges.get(3).get("previous").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(3).get("current")).isEmpty(); - assertThat(itemChanges.get(3).get("metadata").get("operationType").asText()).isEqualTo("delete"); - assertThat(itemChanges.get(3).get("metadata").get("previousImageLSN").asText() - ).isEqualTo(itemChanges.get(2).get("metadata").get("lsn").asText()); - // Assert item2 deleted with TTL - // TODO: Missing TTL logic showing up - } else { - fail("change feed missing results"); + } finally { + safeDeleteCollection(cosmosContainer); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void fullFidelityChangeFeed_FromContinuationTokenOperationsOrder() throws Exception { - CosmosContainer cosmosContainer = initializeFFCFContainer(0); - CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions - .createForProcessingFromNow(FeedRange.forFullRange()); - options.fullFidelity(); - - Iterator> results = cosmosContainer - .queryChangeFeed(options, JsonNode.class) - .iterableByPage() - .iterator(); - - String continuationToken = null; - while (results.hasNext()) { - FeedResponse response = results.next(); - continuationToken = response.getContinuationToken(); - } + CosmosAsyncContainer cosmosContainer = initializeFFCFContainer(); + try { + CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions + .createForProcessingFromNow(FeedRange.forFullRange()); + options.allVersionsAndDeletes(); + + Iterator> results = cosmosContainer + .queryChangeFeed(options, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + String continuationToken = null; + while (results.hasNext()) { + FeedResponse response = results.next(); + continuationToken = response.getContinuationToken(); + } - options = CosmosChangeFeedRequestOptions.createForProcessingFromContinuation(continuationToken); - options.fullFidelity(); - options.setMaxItemCount(150); // get all results in one page - - // Create, replace, and delete 50 objects for 150 total operations - for (int i = 0; i < 50; i++) { - TestItem currentItem = new TestItem("item"+ i, "mypk", "Smith"); - cosmosContainer.upsertItem(currentItem); - currentItem.setProp("Jefferson"); - cosmosContainer.upsertItem(currentItem); - cosmosContainer.deleteItem(currentItem, new CosmosItemRequestOptions()); - } + options = CosmosChangeFeedRequestOptions.createForProcessingFromContinuation(continuationToken); + options.allVersionsAndDeletes(); + options.setMaxItemCount(150); // get all results in one page + + // Create, replace, and delete 50 objects for 150 total operations + for (int i = 0; i < 50; i++) { + TestItem currentItem = new TestItem("item"+ i, "mypk", "Smith"); + cosmosContainer.upsertItem(currentItem).block(); + currentItem.setProp("Jefferson"); + cosmosContainer.upsertItem(currentItem).block(); + cosmosContainer.deleteItem(currentItem, new CosmosItemRequestOptions()).block(); + } - results = cosmosContainer - .queryChangeFeed(options, JsonNode.class) - .iterableByPage() - .iterator(); - - if (results.hasNext()) { - FeedResponse response = results.next(); - List itemChanges = response.getResults(); - assertGatewayMode(response); - assertThat(itemChanges.size()).isEqualTo(150); - // Verify that operations order shows properly - for (int index = 0; index < 150; index+=3) { - assertThat(itemChanges.get(index).get("metadata").get("operationType").asText()).isEqualTo("create"); - assertThat(itemChanges.get(index+1).get("metadata").get("operationType").asText()).isEqualTo("replace"); - assertThat(itemChanges.get(index+2).get("metadata").get("operationType").asText()).isEqualTo("delete"); + results = cosmosContainer + .queryChangeFeed(options, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + if (results.hasNext()) { + FeedResponse response = results.next(); + List itemChanges = response.getResults(); + assertThat(itemChanges.size()).isEqualTo(150); + // Verify that operations order shows properly + for (int index = 0; index < 150; index+=3) { + assertThat(itemChanges.get(index).get("metadata").get("operationType").asText()).isEqualTo("create"); + assertThat(itemChanges.get(index+1).get("metadata").get("operationType").asText()).isEqualTo("replace"); + assertThat(itemChanges.get(index+2).get("metadata").get("operationType").asText()).isEqualTo("delete"); + } + } else { + fail("change feed missing results"); } - } else { - fail("change feed missing results"); + } finally { + safeDeleteCollection(cosmosContainer); } } + // TODO: re-enable this test once pipeline emulator has these changes - currently only in preview - @Test(groups = { "emulator" }, enabled = false, timeOut = TIMEOUT) + @Test(groups = { "emulator" }, timeOut = TIMEOUT, enabled = false) public void fullFidelityChangeFeed_VerifyPreviousPresentOnReplace() throws Exception { - CosmosContainer cosmosContainer = initializeFFCFContainer(2); - CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions - .createForProcessingFromNow(FeedRange.forFullRange()); - options.fullFidelity(); - - Iterator> results = cosmosContainer - .queryChangeFeed(options, JsonNode.class) - .iterableByPage() - .iterator(); - - String continuationToken = null; - while (results.hasNext()) { - FeedResponse response = results.next(); - continuationToken = response.getContinuationToken(); - } + CosmosAsyncContainer cosmosContainer = initializeFFCFContainer(); + try { + CosmosChangeFeedRequestOptions options = CosmosChangeFeedRequestOptions + .createForProcessingFromNow(FeedRange.forFullRange()); + options.allVersionsAndDeletes(); + + Iterator> results = cosmosContainer + .queryChangeFeed(options, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + String continuationToken = null; + while (results.hasNext()) { + FeedResponse response = results.next(); + continuationToken = response.getContinuationToken(); + } - options = CosmosChangeFeedRequestOptions - .createForProcessingFromContinuation(continuationToken); - options.fullFidelity(); - - TestItem item1 = new TestItem( - UUID.randomUUID().toString(), - "mypk", "Johnson"); - cosmosContainer.upsertItem(item1); - String originalLastName = item1.getProp(); - item1.setProp("Gates"); - cosmosContainer.upsertItem(item1); - String secondLastName = item1.getProp(); - item1.setProp("DiCaprio"); - String thirdLastName = item1.getProp(); - cosmosContainer.upsertItem(item1); - item1.setProp(originalLastName); - cosmosContainer.upsertItem(item1); - cosmosContainer.deleteItem(item1, new CosmosItemRequestOptions()); - - results = cosmosContainer - .queryChangeFeed(options, JsonNode.class) - .iterableByPage() - .iterator(); - - if (results.hasNext()) { - FeedResponse response = results.next(); - List itemChanges = response.getResults(); - assertGatewayMode(response); - assertThat(itemChanges.size()).isEqualTo(5); - // Assert initial creation of item1 - assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastName); - assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); - // Verify separate replace operations - assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(secondLastName); - assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("replace"); - assertThat(itemChanges.get(1).get("metadata").get("previousImageLSN").asText() - ).isEqualTo(itemChanges.get(0).get("metadata").get("lsn").asText()); - assertThat(itemChanges.get(1).get("previous")).isEqualTo(itemChanges.get(0).get("current")); - - assertThat(itemChanges.get(2).get("current").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(2).get("current").get("prop").asText()).isEqualTo(thirdLastName); - assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("replace"); - assertThat(itemChanges.get(2).get("metadata").get("previousImageLSN").asText() - ).isEqualTo(itemChanges.get(1).get("metadata").get("lsn").asText()); - assertThat(itemChanges.get(2).get("previous")).isEqualTo(itemChanges.get(1).get("current")); - - assertThat(itemChanges.get(3).get("previous").get("id").asText()).isEqualTo(item1.getId()); - assertThat(itemChanges.get(3).get("current").get("prop").asText()).isEqualTo(item1.getProp()); - assertThat(itemChanges.get(3).get("metadata").get("operationType").asText()).isEqualTo("replace"); - assertThat(itemChanges.get(3).get("metadata").get("previousImageLSN").asText() - ).isEqualTo(itemChanges.get(2).get("metadata").get("lsn").asText()); - assertThat(itemChanges.get(3).get("previous")).isEqualTo(itemChanges.get(2).get("current")); - } else { - fail("change feed missing results"); - } - } + options = CosmosChangeFeedRequestOptions + .createForProcessingFromContinuation(continuationToken); + options.allVersionsAndDeletes(); + + TestItem item1 = new TestItem( + UUID.randomUUID().toString(), + "mypk", "Johnson"); + cosmosContainer.upsertItem(item1).block(); + String originalLastName = item1.getProp(); + item1.setProp("Gates"); + cosmosContainer.upsertItem(item1).block(); + String secondLastName = item1.getProp(); + item1.setProp("DiCaprio"); + String thirdLastName = item1.getProp(); + cosmosContainer.upsertItem(item1).block(); + item1.setProp(originalLastName); + cosmosContainer.upsertItem(item1).block(); + cosmosContainer.deleteItem(item1, new CosmosItemRequestOptions()).block(); + + results = cosmosContainer + .queryChangeFeed(options, JsonNode.class) + .byPage() + .toIterable() + .iterator(); + + if (results.hasNext()) { + FeedResponse response = results.next(); + List itemChanges = response.getResults(); + assertThat(itemChanges.size()).isEqualTo(5); + // Assert initial creation of item1 + assertThat(itemChanges.get(0).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(0).get("current").get("prop").asText()).isEqualTo(originalLastName); + assertThat(itemChanges.get(0).get("metadata").get("operationType").asText()).isEqualTo("create"); + // Verify separate replace operations + assertThat(itemChanges.get(1).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(1).get("current").get("prop").asText()).isEqualTo(secondLastName); + assertThat(itemChanges.get(1).get("metadata").get("operationType").asText()).isEqualTo("replace"); + assertThat(itemChanges.get(1).get("metadata").get("previousImageLSN").asText() + ).isEqualTo(itemChanges.get(0).get("metadata").get("lsn").asText()); + assertThat(itemChanges.get(1).get("previous")).isEqualTo(itemChanges.get(0).get("current")); - public CosmosContainer initializeFFCFContainer(int ttl) { - CosmosClient ffcfCosmosClient = new CosmosClientBuilder() - .endpoint(TestConfigurations.HOST) - .key(TestConfigurations.MASTER_KEY) - .consistencyLevel(ConsistencyLevel.SESSION) - .contentResponseOnWriteEnabled(true) - .directMode() - .buildClient(); - PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition(); - ArrayList paths = new ArrayList<>(); - paths.add("/mypk"); - partitionKeyDef.setPaths(paths); - - String containerId = "FFCF_container" + UUID.randomUUID().toString(); - CosmosContainerProperties containerProperties = - new CosmosContainerProperties(containerId, partitionKeyDef); - if (ttl != 0) { - containerProperties.setDefaultTimeToLiveInSeconds(ttl); + assertThat(itemChanges.get(2).get("current").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(2).get("current").get("prop").asText()).isEqualTo(thirdLastName); + assertThat(itemChanges.get(2).get("metadata").get("operationType").asText()).isEqualTo("replace"); + assertThat(itemChanges.get(2).get("metadata").get("previousImageLSN").asText() + ).isEqualTo(itemChanges.get(1).get("metadata").get("lsn").asText()); + assertThat(itemChanges.get(2).get("previous")).isEqualTo(itemChanges.get(1).get("current")); + + assertThat(itemChanges.get(3).get("previous").get("id").asText()).isEqualTo(item1.getId()); + assertThat(itemChanges.get(3).get("current").get("prop").asText()).isEqualTo(item1.getProp()); + assertThat(itemChanges.get(3).get("metadata").get("operationType").asText()).isEqualTo("replace"); + assertThat(itemChanges.get(3).get("metadata").get("previousImageLSN").asText() + ).isEqualTo(itemChanges.get(2).get("metadata").get("lsn").asText()); + assertThat(itemChanges.get(3).get("previous")).isEqualTo(itemChanges.get(2).get("current")); + } else { + fail("change feed missing results"); + } + } finally { + safeDeleteCollection(cosmosContainer); } - containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(5))); - - CosmosDatabaseResponse databaseResponse = ffcfCosmosClient.createDatabaseIfNotExists(createdDatabase.getId()); - CosmosDatabase database = ffcfCosmosClient.getDatabase(databaseResponse.getProperties().getId()); - CosmosContainerResponse containerResponse = database.createContainerIfNotExists(containerProperties); - - return database.getContainer(containerResponse.getProperties().getId()); } - // TODO: check why diagnostics are not showing this for change feed - void assertGatewayMode(FeedResponse response) { - String diagnostics = response.getCosmosDiagnostics().toString(); - logger.info("Full Fidelity Diagnostics are : {}", diagnostics); - assertThat(diagnostics).contains(""); + public CosmosAsyncContainer initializeFFCFContainer() { + CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(); + cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5))); + return createCollection(client, createdDatabase.getId(), cosmosContainerProperties); } - @BeforeClass(groups = { "simple", "emulator" }, timeOut = SETUP_TIMEOUT) + @BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT) public void before_ChangeFeedTest() throws Exception { // set up the client - client = clientBuilder().build(); - createdDatabase = SHARED_DATABASE; + client = this.getClientBuilder().buildAsyncClient(); + createdDatabase = getSharedCosmosDatabase(this.client); } - @AfterClass(groups = { "simple", "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + @AfterClass(groups = { "emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } - - @Retention(java.lang.annotation.RetentionPolicy.RUNTIME) - @Target({ METHOD }) - @interface Tag { - String name(); - } } diff --git a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/TestSuiteBase.java b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/TestSuiteBase.java index af6db656230f6..6d0cf822c59c3 100644 --- a/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/TestSuiteBase.java +++ b/sdk/cosmos/azure-cosmos/src/test/java/com/azure/cosmos/rx/TestSuiteBase.java @@ -521,7 +521,7 @@ private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient clien static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() { CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString()); - cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(5))); + cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5))); return cosmosContainerProperties; }