From ba4feb33e7cc4e4881667427d6f9bfce011511c0 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 16:16:40 -0800 Subject: [PATCH 001/110] Add example source --- .../cosmos/examples/AccountSettings.java | 38 + .../com/azure/cosmos/examples/BasicDemo.java | 204 +++++ .../azure/cosmos/examples/HelloWorldDemo.java | 105 +++ .../examples/changefeed/CustomPOJO.java | 20 + .../changefeed/SampleChangeFeedProcessor.java | 349 +++++++ .../changefeed/SampleConfigurations.java | 36 + .../examples/common/AccountSettings.java | 40 + .../azure/cosmos/examples/common/Address.java | 35 + .../azure/cosmos/examples/common/Child.java | 53 ++ .../cosmos/examples/common/Families.java | 117 +++ .../azure/cosmos/examples/common/Family.java | 74 ++ .../azure/cosmos/examples/common/Parent.java | 33 + .../com/azure/cosmos/examples/common/Pet.java | 16 + .../async/SampleCRUDQuickstartAsync.java | 287 ++++++ .../sync/SampleCRUDQuickstart.java | 206 +++++ .../async/SampleIndexManagementAsync.java | 343 +++++++ .../sync/SampleIndexManagement.java | 249 +++++ .../async/SampleStoredProcedureAsync.java | 181 ++++ .../sync/SampleStoredProcedure.java | 149 +++ .../multimaster/ConfigurationManager.java | 12 + .../rx/examples/multimaster/Helpers.java | 84 ++ .../multimaster/samples/ConflictWorker.java | 858 ++++++++++++++++++ .../rx/examples/multimaster/samples/Main.java | 59 ++ .../samples/MultiMasterScenario.java | 146 +++ .../examples/multimaster/samples/Worker.java | 166 ++++ src/main/resources/log4j2.properties | 21 + .../multi-master-sample-config.properties | 8 + src/main/resources/resolver-storedproc.txt | 45 + 28 files changed, 3934 insertions(+) create mode 100644 src/main/java/com/azure/cosmos/examples/AccountSettings.java create mode 100644 src/main/java/com/azure/cosmos/examples/BasicDemo.java create mode 100644 src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java create mode 100644 src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java create mode 100644 src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java create mode 100644 src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/AccountSettings.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/Address.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/Child.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/Families.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/Family.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/Parent.java create mode 100644 src/main/java/com/azure/cosmos/examples/common/Pet.java create mode 100644 src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java create mode 100644 src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java create mode 100644 src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java create mode 100644 src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java create mode 100644 src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java create mode 100644 src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java create mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java create mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java create mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java create mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java create mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java create mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java create mode 100644 src/main/resources/log4j2.properties create mode 100644 src/main/resources/multi-master-sample-config.properties create mode 100644 src/main/resources/resolver-storedproc.txt diff --git a/src/main/java/com/azure/cosmos/examples/AccountSettings.java b/src/main/java/com/azure/cosmos/examples/AccountSettings.java new file mode 100644 index 0000000..e413f4e --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/AccountSettings.java @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples; + +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the account configurations for Sample. + * + * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + * + * + * If none of the above is set, emulator endpoint will be used. + * Emulator http cert is self signed. If you are using emulator, + * make sure emulator https certificate is imported + * to java trusted cert store: + * https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator-export-ssl-certificates + */ +public class AccountSettings { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + public static final String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("COSMOS_ACCOUNT_HOST")), + "https://localhost:8081/")); + public static final String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("COSMOS_ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); +} diff --git a/src/main/java/com/azure/cosmos/examples/BasicDemo.java b/src/main/java/com/azure/cosmos/examples/BasicDemo.java new file mode 100644 index 0000000..4db349e --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/BasicDemo.java @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples; + +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosAsyncItemResponse; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainerProperties; +import com.azure.cosmos.CosmosContinuablePagedFlux; +import com.azure.cosmos.implementation.CosmosItemProperties; +import com.azure.cosmos.FeedOptions; +import com.azure.cosmos.FeedResponse; +import com.azure.cosmos.PartitionKey; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +public class BasicDemo { + + private static final String DATABASE_NAME = "test_db"; + private static final String CONTAINER_NAME = "test_container"; + private CosmosAsyncClient client; + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + public static void main(String[] args) { + BasicDemo demo = new BasicDemo(); + demo.start(); + } + + private void start() { + // Get client + client = CosmosAsyncClient.cosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .buildAsyncClient(); + + //CREATE a database and a container + createDbAndContainerBlocking(); + + //Get a proxy reference to container + container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME); + + CosmosAsyncContainer container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME); + TestObject testObject = new TestObject("item_new_id_1", "test", "test description", "US"); + TestObject testObject2 = new TestObject("item_new_id_2", "test2", "test description2", "CA"); + + //CREATE an Item async + + Mono> itemResponseMono = container.createItem(testObject); + //CREATE another Item async + Mono> itemResponseMono1 = container.createItem(testObject2); + + //Wait for completion + try { + itemResponseMono.doOnError(throwable -> log("CREATE item 1", throwable)) + .mergeWith(itemResponseMono1) + .doOnError(throwable -> log("CREATE item 2 ", throwable)) + .doOnComplete(() -> log("Items created")) + .publishOn(Schedulers.elastic()) + .blockLast(); + } catch (RuntimeException e) { + log("Couldn't create items due to above exceptions"); + } + + createAndReplaceItem(); + queryItems(); + queryWithContinuationToken(); + + //Close client + client.close(); + log("Completed"); + } + + private void createAndReplaceItem() { + TestObject replaceObject = new TestObject("item_new_id_3", "test3", "test description3", "JP"); + TestObject properties = null; + //CREATE item sync + try { + properties = container.createItem(replaceObject) + .doOnError(throwable -> log("CREATE 3", throwable)) + .publishOn(Schedulers.elastic()) + .block() + .getResource(); + } catch (RuntimeException e) { + log("Couldn't create items due to above exceptions"); + } + if (properties != null) { + replaceObject.setName("new name test3"); + + //REPLACE the item and wait for completion + container.replaceItem(replaceObject, + properties.getId(), + new PartitionKey(replaceObject.getCountry())) + .block(); + } + } + + private void createDbAndContainerBlocking() { + client.createDatabaseIfNotExists(DATABASE_NAME) + .doOnSuccess(cosmosDatabaseResponse -> log("Database: " + cosmosDatabaseResponse.getDatabase().getId())) + .flatMap(dbResponse -> dbResponse.getDatabase() + .createContainerIfNotExists(new CosmosContainerProperties(CONTAINER_NAME, + "/country"))) + .doOnSuccess(cosmosContainerResponse -> log("Container: " + cosmosContainerResponse.getContainer().getId())) + .doOnError(throwable -> log(throwable.getMessage())) + .publishOn(Schedulers.elastic()) + .block(); + } + + private void queryItems() { + log("+ Querying the collection "); + String query = "SELECT * from root"; + FeedOptions options = new FeedOptions(); + options.setMaxDegreeOfParallelism(2); + CosmosContinuablePagedFlux queryFlux = container.queryItems(query, options, TestObject.class); + + queryFlux.byPage() + .publishOn(Schedulers.elastic()) + .toIterable() + .forEach(cosmosItemFeedResponse -> { + log(cosmosItemFeedResponse.getResults()); + }); + + } + + private void queryWithContinuationToken() { + log("+ Query with paging using continuation token"); + String query = "SELECT * from root r "; + FeedOptions options = new FeedOptions(); + options.populateQueryMetrics(true); + options.maxItemCount(1); + String continuation = null; + do { + options.requestContinuation(continuation); + CosmosContinuablePagedFlux queryFlux = container.queryItems(query, options, TestObject.class); + FeedResponse page = queryFlux.byPage().blockFirst(); + assert page != null; + log(page.getResults()); + continuation = page.getContinuationToken(); + } while (continuation != null); + + } + + private void log(Object object) { + System.out.println(object); + } + + private void log(String msg, Throwable throwable) { + if (throwable instanceof CosmosClientException) { + log(msg + ": " + ((CosmosClientException) throwable).getStatusCode()); + } + } + + static class TestObject { + String id; + String name; + String description; + String country; + + public TestObject() { + } + + public TestObject(String id, String name, String description, String country) { + this.id = id; + this.name = name; + this.description = description; + this.country = country; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getCountry() { + return country; + } + + public void setCountry(String country) { + this.country = country; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + } +} diff --git a/src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java b/src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java new file mode 100644 index 0000000..f571a11 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java @@ -0,0 +1,105 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples; + +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.implementation.CosmosItemProperties; +import com.azure.cosmos.PartitionKey; +import reactor.core.publisher.Mono; + +import java.io.IOException; + +public class HelloWorldDemo { + public static void main(String[] args) { + new HelloWorldDemo().runDemo(); + } + + void runDemo() { + // Create a new CosmosAsyncClient via the CosmosClientBuilder + // It only requires endpoint and key, but other useful settings are available + CosmosAsyncClient client = new CosmosClientBuilder() + .setEndpoint("") + .setKey("") + .buildAsyncClient(); + + // Get a reference to the container + // This will create (or read) a database and its container. + CosmosAsyncContainer container = client.createDatabaseIfNotExists("contoso-travel") + // TIP: Our APIs are Reactor Core based, so try to chain your calls + .flatMap(response -> response.getDatabase() + .createContainerIfNotExists("passengers", "/id")) + .flatMap(response -> Mono.just(response.getContainer())) + .block(); // Blocking for demo purposes (avoid doing this in production unless you must) + + // Create an item + container.createItem(new Passenger("carla.davis@outlook.com", "Carla Davis", "SEA", "IND")) + .flatMap(response -> { + System.out.println("Created item: " + response.getResource()); + // Read that item 👓 + return container.readItem(response.getResource().getId(), + new PartitionKey(response.getResource().getId()), + Passenger.class); + }) + .flatMap(response -> { + System.out.println("Read item: " + response.getResource()); + // Replace that item 🔁 + Passenger p = response.getResource(); + p.setDestination("SFO"); + return container.replaceItem(p, + response.getResource().getId(), + new PartitionKey(response.getResource().getId())); + }) + // delete that item 💣 + .flatMap(response -> container.deleteItem(response.getResource().getId(), + new PartitionKey(response.getResource().getId()))) + .block(); // Blocking for demo purposes (avoid doing this in production unless you must) + } + + // Just a random object for demo's sake + public class Passenger { + String id; + String name; + String destination; + String source; + + public Passenger(String id, String name, String destination, String source) { + this.id = id; + this.name = name; + this.destination = destination; + this.source = source; + } + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDestination() { + return destination; + } + + public void setDestination(String destination) { + this.destination = destination; + } + + public String getSource() { + return source; + } + + public void setSource(String source) { + this.source = source; + } + } +} diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java b/src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java new file mode 100644 index 0000000..9562196 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java @@ -0,0 +1,20 @@ +package com.azure.cosmos.examples.changefeed; + +import com.azure.cosmos.JsonSerializable; + +public class CustomPOJO { + private String id; + + public CustomPOJO() { + + } + + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } +} diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java new file mode 100644 index 0000000..05e6934 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -0,0 +1,349 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples.changefeed; + +import com.azure.cosmos.ChangeFeedProcessor; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncContainerResponse; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainerProperties; +import com.azure.cosmos.CosmosContainerRequestOptions; +import com.azure.cosmos.implementation.CosmosItemProperties; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.implementation.Utils; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang3.RandomStringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.List; + +/** + * Sample for Change Feed Processor. + * + */ +public class SampleChangeFeedProcessor { + + public static int WAIT_FOR_WORK = 60000; + public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); + public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); + private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + + private static ChangeFeedProcessor changeFeedProcessorInstance; + private static boolean isWorkCompleted = false; + + public static void main (String[]args) { + logger.info("BEGIN Sample"); + + try { + + System.out.println("-->CREATE DocumentClient"); + CosmosAsyncClient client = getCosmosClient(); + + System.out.println("-->CREATE sample's database: " + DATABASE_NAME); + CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); + + System.out.println("-->CREATE container for documents: " + COLLECTION_NAME); + CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME); + + System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); + CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); + + changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); + System.out.println("Got here\n"); + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .doOnSuccess(aVoid -> { + System.out.println("!doOnSuccess!\n"); + createNewDocumentsJSON(feedContainer, 10, Duration.ofSeconds(3)); + isWorkCompleted = true; + }) + .subscribe(); + System.out.println("and here\n"); + + long remainingWork = WAIT_FOR_WORK; + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + + if (isWorkCompleted) { + if (changeFeedProcessorInstance != null) { + changeFeedProcessorInstance.stop().subscribe(); + } + } else { + throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); + } + + System.out.println("-->DELETE sample's database: " + DATABASE_NAME); + deleteDatabase(cosmosDatabase); + + Thread.sleep(500); + + } catch (Exception e) { + e.printStackTrace(); + } + + System.out.println("END Sample"); + } + + public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { + return ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + System.out.println("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + try { + System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + } catch (JsonProcessingException e) { + e.printStackTrace(); + } + } + System.out.println("--->handleChanges() END"); + + }) + .build(); + } + + /* + public static ChangeFeedProcessor getChangeFeedProcessorCustomPOJO(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { + return ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + System.out.println("--->setHandleChanges() START"); + + for (CustomPOJO document : docs) { + try { + System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + } catch (JsonProcessingException e) { + e.printStackTrace(); + } + } + System.out.println("--->handleChanges() END"); + + }) + .build(); + } + + + */ + public static CosmosAsyncClient getCosmosClient() { + + return new CosmosClientBuilder() + .setEndpoint(SampleConfigurations.HOST) + .setKey(SampleConfigurations.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + } + + public static CosmosAsyncDatabase createNewDatabase(CosmosAsyncClient client, String databaseName) { + return client.createDatabaseIfNotExists(databaseName).block().getDatabase(); + } + + public static void deleteDatabase(CosmosAsyncDatabase cosmosDatabase) { + cosmosDatabase.delete().block(); + } + + public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, String databaseName, String collectionName) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer collectionLink = databaseLink.getContainer(collectionName); + CosmosAsyncContainerResponse containerResponse = null; + + try { + containerResponse = collectionLink.read().block(); + + if (containerResponse != null) { + throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, "/id"); + + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); + + if (containerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); + } + + return containerResponse.getContainer(); + } + + public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient client, String databaseName, String leaseCollectionName) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); + CosmosAsyncContainerResponse leaseContainerResponse = null; + + try { + leaseContainerResponse = leaseCollectionLink.read().block(); + + if (leaseContainerResponse != null) { + leaseCollectionLink.delete().block(); + + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); + + if (leaseContainerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); + } + + return leaseContainerResponse.getContainer(); + } + + public static void createNewDocuments(CosmosAsyncContainer containerClient, int count, Duration delay) { + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { + CustomPOJO document = new CustomPOJO(); + document.setId(String.format("0%d-%s", i, suffix)); + + containerClient.createItem(document).subscribe(doc -> { + try { + System.out.println("---->DOCUMENT WRITE: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(doc)); + } catch (JsonProcessingException e) { + System.err.println(String.format("Failure in processing json %s", e.getMessage())); + } + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } + + public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerClient, int count, Duration delay) { + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { + CustomPOJO document = new CustomPOJO(); + document.setId(String.format("0%d-%s", i, suffix)); + + containerClient.createItem(document).subscribe(doc -> { + System.out.println("---->DOCUMENT WRITE: " + doc); + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } + + public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { +// CosmosItemProperties document = new CosmosItemProperties(); +// document.setId(String.format("0%d-%s", i, suffix)); + + String jsonString = + "{\n" + + "\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"\n" + + "}"; + + ObjectMapper mapper = new ObjectMapper(); + JsonNode document = null; + + try { + document = mapper.readTree(jsonString); + } catch (Exception e) { + e.printStackTrace(); + } + + containerClient.createItem(document).subscribe(doc -> { + try { + System.out.println("---->DOCUMENT WRITE: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(doc)); + } catch (JsonProcessingException e) { + System.err.println(String.format("Failure in processing json %s", e.getMessage())); + } + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } + + public static boolean ensureWorkIsDone(Duration delay) { + long remainingWork = delay.toMillis(); + try { + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + return false; + } + + return remainingWork > 0; + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java new file mode 100644 index 0000000..86e75c7 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples.changefeed; + +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the configurations for tests. + *

+ * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ *

+ * If none of the above is set, emulator endpoint will be used. + */ +public final class SampleConfigurations { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:8081/")); +} diff --git a/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java b/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java new file mode 100644 index 0000000..12bb3ba --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the account configurations for Sample. + * + * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ * + * If none of the above is set, emulator endpoint will be used. + * Emulator http cert is self signed. If you are using emulator, + * make sure emulator https certificate is imported + * to java trusted cert store: + * https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator-export-ssl-certificates + */ +public class AccountSettings { + // Replace MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:443/")); +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Address.java b/src/main/java/com/azure/cosmos/examples/common/Address.java new file mode 100644 index 0000000..ec7d5b3 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Address.java @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Address { + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public String getCounty() { + return county; + } + + public void setCounty(String county) { + this.county = county; + } + + public String getCity() { + return city; + } + + public void setCity(String city) { + this.city = city; + } + + private String state; + private String county; + private String city; +} + diff --git a/src/main/java/com/azure/cosmos/examples/common/Child.java b/src/main/java/com/azure/cosmos/examples/common/Child.java new file mode 100644 index 0000000..98cdd5c --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Child.java @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Child { + public String getFamilyName() { + return familyName; + } + + public void setFamilyName(String familyName) { + this.familyName = familyName; + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getGender() { + return gender; + } + + public void setGender(String gender) { + this.gender = gender; + } + + public int getGrade() { + return grade; + } + + public void setGrade(int grade) { + this.grade = grade; + } + + public Pet[] getPets() { + return pets; + } + + public void setPets(Pet[] pets) { + this.pets = pets; + } + + private String familyName; + private String firstName; + private String gender; + private int grade; + private Pet[] pets; +} + diff --git a/src/main/java/com/azure/cosmos/examples/common/Families.java b/src/main/java/com/azure/cosmos/examples/common/Families.java new file mode 100644 index 0000000..fd549ee --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Families.java @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Families { + + public static Family getAndersenFamilyItem() { + Family andersenFamily = new Family(); + andersenFamily.setId("Andersen-" + System.currentTimeMillis()); + andersenFamily.setLastName("Andersen"); + + Parent parent1 = new Parent(); + parent1.setFirstName("Thomas"); + + Parent parent2 = new Parent(); + parent2.setFirstName("Mary Kay"); + + andersenFamily.setParents(new Parent[] { parent1, parent2 }); + + Child child1 = new Child(); + child1.setFirstName("Henriette Thaulow"); + child1.setGender("female"); + child1.setGrade(5); + + Pet pet1 = new Pet(); + pet1.setGivenName("Fluffy"); + + child1.setPets(new Pet[] { pet1 }); + + andersenFamily.setDistrict("WA5"); + Address address = new Address(); + address.setCity("Seattle"); + address.setCounty("King"); + address.setState("WA"); + + andersenFamily.setAddress(address); + andersenFamily.setRegistered(true); + + return andersenFamily; + } + + public static Family getWakefieldFamilyItem() { + Family wakefieldFamily = new Family(); + wakefieldFamily.setId("Wakefield-" + System.currentTimeMillis()); + wakefieldFamily.setLastName("Wakefield"); + + Parent parent1 = new Parent(); + parent1.setFamilyName("Wakefield"); + parent1.setFirstName("Robin"); + + Parent parent2 = new Parent(); + parent2.setFamilyName("Miller"); + parent2.setFirstName("Ben"); + + wakefieldFamily.setParents(new Parent[] { parent1, parent2 }); + + Child child1 = new Child(); + child1.setFirstName("Jesse"); + child1.setFamilyName("Merriam"); + child1.setGrade(8); + + Pet pet1 = new Pet(); + pet1.setGivenName("Goofy"); + + Pet pet2 = new Pet(); + pet2.setGivenName("Shadow"); + + child1.setPets(new Pet[] { pet1, pet2 }); + + Child child2 = new Child(); + child2.setFirstName("Lisa"); + child2.setFamilyName("Miller"); + child2.setGrade(1); + child2.setGender("female"); + + wakefieldFamily.setChildren(new Child[] { child1, child2 }); + + Address address = new Address(); + address.setCity("NY"); + address.setCounty("Manhattan"); + address.setState("NY"); + + wakefieldFamily.setAddress(address); + wakefieldFamily.setDistrict("NY23"); + wakefieldFamily.setRegistered(true); + return wakefieldFamily; + } + + public static Family getJohnsonFamilyItem() { + Family andersenFamily = new Family(); + andersenFamily.setId("Johnson-" + System.currentTimeMillis()); + andersenFamily.setLastName("Johnson"); + + Parent parent1 = new Parent(); + parent1.setFirstName("John"); + + Parent parent2 = new Parent(); + parent2.setFirstName("Lili"); + + return andersenFamily; + } + + public static Family getSmithFamilyItem() { + Family andersenFamily = new Family(); + andersenFamily.setId("Smith-" + System.currentTimeMillis()); + andersenFamily.setLastName("Smith"); + + Parent parent1 = new Parent(); + parent1.setFirstName("John"); + + Parent parent2 = new Parent(); + parent2.setFirstName("Cynthia"); + + return andersenFamily; + } +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Family.java b/src/main/java/com/azure/cosmos/examples/common/Family.java new file mode 100644 index 0000000..4e337b3 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Family.java @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Family { + public Family() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getDistrict() { + return district; + } + + public void setDistrict(String district) { + this.district = district; + } + + public Parent[] getParents() { + return parents; + } + + public void setParents(Parent[] parents) { + this.parents = parents; + } + + public Child[] getChildren() { + return children; + } + + public void setChildren(Child[] children) { + this.children = children; + } + + public Address getAddress() { + return address; + } + + public void setAddress(Address address) { + this.address = address; + } + + public boolean isRegistered() { + return isRegistered; + } + + public void setRegistered(boolean isRegistered) { + this.isRegistered = isRegistered; + } + + private String id; + private String lastName; + private String district; + private Parent[] parents; + private Child[] children; + private Address address; + private boolean isRegistered; +} + diff --git a/src/main/java/com/azure/cosmos/examples/common/Parent.java b/src/main/java/com/azure/cosmos/examples/common/Parent.java new file mode 100644 index 0000000..d7753a8 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Parent.java @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Parent { + + public Parent() { + } + + public Parent(String firstName) { + this.firstName = firstName; + } + + public String getFamilyName() { + return familyName; + } + + public void setFamilyName(String familyName) { + this.familyName = familyName; + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + private String familyName; + private String firstName; +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Pet.java b/src/main/java/com/azure/cosmos/examples/common/Pet.java new file mode 100644 index 0000000..062ce93 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Pet.java @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Pet { + public String getGivenName() { + return givenName; + } + + public void setGivenName(String givenName) { + this.givenName = givenName; + } + + private String givenName; +} diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java new file mode 100644 index 0000000..bb1637e --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -0,0 +1,287 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.crudquickstart.async; + + +import com.azure.cosmos.*; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.google.common.collect.Lists; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SampleCRUDQuickstartAsync { + + private CosmosAsyncClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + * + * @param args command line args. + */ + //
+ public static void main(String[] args) { + SampleCRUDQuickstartAsync p = new SampleCRUDQuickstartAsync(); + + try { + logger.info("Starting ASYNC main"); + System.out.println("got here.\n"); + p.getStartedDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.close(); + } + } + + //
+ + private void getStartedDemo() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create async client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + // + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + Family andersenFamilyItem=Families.getAndersenFamilyItem(); + Family wakefieldFamilyItem=Families.getWakefieldFamilyItem(); + Family johnsonFamilyItem=Families.getJohnsonFamilyItem(); + Family smithFamilyItem=Families.getSmithFamilyItem(); + + // Setup family items to create + Flux familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + createFamilies(familiesToCreate); + + familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); + databaseIfNotExists.flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + logger.info("Checking database " + database.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + // + } + + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + // + + CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + Mono containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400); + + // Create container with 400 RU/s + containerIfNotExists.flatMap(containerResponse -> { + container = containerResponse.getContainer(); + logger.info("Checking container " + container.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + + // + } + + private void createFamilies(Flux families) throws Exception { + + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + // Combine multiple item inserts, associated success println's, and a final aggregate stats println into one Reactive stream. + families.flatMap(family -> { + return container.createItem(family); + }) //Flux of item request responses + .flatMap(itemResponse -> { + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + logger.info(String.format("Item ID: %s\n", itemResponse.getResource().getId())); + return Mono.just(itemResponse.getRequestCharge()); + }) //Flux of request charges + .reduce(0.0, + (charge_n,charge_nplus1) -> charge_n + charge_nplus1 + ) //Mono of total charge - there will be only one item in this stream + .subscribe(charge -> { + logger.info(String.format("Created items with total request charge of %.2f\n", + charge)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); //Preserve the total charge and print aggregate charge/item count stats. + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption",err); + } + + // + } + + private void readItems(Flux familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + familiesToCreate.flatMap(family -> { + Mono> asyncItemResponseMono = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + return asyncItemResponseMono; + }) + .subscribe( + itemResponse -> { + double requestCharge = itemResponse.getRequestCharge(); + Duration requestLatency = itemResponse.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + itemResponse.getResource().getId(), requestCharge, requestLatency)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption",err); + } + + // + } + + private void queryItems() { + // + // Set some common query options + + FeedOptions queryOptions = new FeedOptions(); + queryOptions.maxItemCount(10); + //queryOptions.setEnableCrossPartitionQuery(true); //No longer needed in SDK v4 + // Set populate query metrics to get metrics around query executions + queryOptions.populateQueryMetrics(true); + + CosmosContinuablePagedFlux pagedFluxResponse = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + pagedFluxResponse.byPage().subscribe( + fluxResponse -> { + logger.info("Got a page of query result with " + + fluxResponse.getResults().size() + " items(s)" + + " and request charge of " + fluxResponse.getRequestCharge()); + + logger.info("Item Ids " + fluxResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption",err); + } + + // + } +} diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java new file mode 100644 index 0000000..637879f --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.crudquickstart.sync; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosContainerProperties; +import com.azure.cosmos.CosmosContinuablePagedIterable; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosItemRequestOptions; +import com.azure.cosmos.CosmosItemResponse; +import com.azure.cosmos.FeedOptions; +import com.azure.cosmos.PartitionKey; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.google.common.collect.Lists; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SampleCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + * + * @param args command line args. + */ + //
+ public static void main(String[] args) { + SampleCRUDQuickstart p = new SampleCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.getStartedDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.close(); + } + } + + //
+ + private void getStartedDemo() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + // + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + // Setup family items to create + ArrayList familiesToCreate = new ArrayList<>(); + familiesToCreate.add(Families.getAndersenFamilyItem()); + familiesToCreate.add(Families.getWakefieldFamilyItem()); + familiesToCreate.add(Families.getJohnsonFamilyItem()); + familiesToCreate.add(Families.getSmithFamilyItem()); + + createFamilies(familiesToCreate); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + // + + logger.info("Checking database " + database.getId() + " completed!\n"); + } + + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + // + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 400 RU/s + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + // + + logger.info("Checking container " + container.getId() + " completed!\n"); + } + + private void createFamilies(List families) throws Exception { + double totalRequestCharge = 0; + for (Family family : families) { + + // + // Create item using container that we created using sync client + + // Use lastName as partitionKey for cosmos item + // Using appropriate partition key improves the performance of database operations + CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); + CosmosItemResponse item = container.createItem(family, new PartitionKey(family.getLastName()), cosmosItemRequestOptions); + // + + // Get request charge and other properties like latency, and diagnostics strings, etc. + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + item.getRequestCharge(), item.getRequestLatency())); + totalRequestCharge += item.getRequestCharge(); + } + logger.info(String.format("Created %d items with total request " + + "charge of %.2f", + families.size(), + totalRequestCharge)); + } + + private void readItems(ArrayList familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + familiesToCreate.forEach(family -> { + // + try { + CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + double requestCharge = item.getRequestCharge(); + Duration requestLatency = item.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + item.getResource().getId(), requestCharge, requestLatency)); + } catch (CosmosClientException e) { + e.printStackTrace(); + logger.error(String.format("Read Item failed with %s", e)); + } + // + }); + } + + private void queryItems() { + // + // Set some common query options + FeedOptions queryOptions = new FeedOptions(); + queryOptions.maxItemCount(10); + //queryOptions.setEnableCrossPartitionQuery(true); //No longer necessary in SDK v4 + // Set populate query metrics to get metrics around query executions + queryOptions.populateQueryMetrics(true); + + CosmosContinuablePagedIterable familiesPagedIterable = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { + logger.info("Got a page of query result with " + + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); + + logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }); + // + } +} diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java new file mode 100644 index 0000000..0148c8c --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -0,0 +1,343 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.indexmanagement.async; + +import com.azure.cosmos.*; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.google.common.collect.Lists; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SampleIndexManagementAsync { + + private CosmosAsyncClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + * + * @param args command line args. + */ + //
+ public static void main(String[] args) { + SampleIndexManagementAsync p = new SampleIndexManagementAsync(); + + try { + logger.info("Starting ASYNC main"); + p.getStartedDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.close(); + } + } + + //
+ + private void getStartedDemo() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create async client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + // + + createDatabaseIfNotExists(); + createContainerIfNotExistsWithSpecifiedIndex(); + + Family andersenFamilyItem=Families.getAndersenFamilyItem(); + Family wakefieldFamilyItem=Families.getWakefieldFamilyItem(); + Family johnsonFamilyItem=Families.getJohnsonFamilyItem(); + Family smithFamilyItem=Families.getSmithFamilyItem(); + + // Setup family items to create + Flux familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + createFamilies(familiesToCreate); + + familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); + databaseIfNotExists.flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + logger.info("Checking database " + database.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + // + } + + private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + // + + CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + + // + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); //To turn indexing off set IndexingMode.NONE + + // Included paths + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.setPath("/*"); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + // Excluded paths + List excludedPaths = new ArrayList<>(); + ExcludedPath excludedPath = new ExcludedPath(); + excludedPath.setPath("/name/*"); + excludedPaths.add(excludedPath); + indexingPolicy.setExcludedPaths(excludedPaths); + + // Spatial indices - if you need them, here is how to set them up: + /* + List spatialIndexes = new ArrayList(); + List collectionOfSpatialTypes = new ArrayList(); + + SpatialSpec spec = new SpatialSpec(); + spec.setPath("/locations/*"); + collectionOfSpatialTypes.add(SpatialType.Point); + spec.setSpatialTypes(collectionOfSpatialTypes); + spatialIndexes.add(spec); + + indexingPolicy.setSpatialIndexes(spatialIndexes); + */ + + // Composite indices - if you need them, here is how to set them up: + /* + List> compositeIndexes = new ArrayList<>(); + List compositePaths = new ArrayList<>(); + + CompositePath nameCompositePath = new CompositePath(); + nameCompositePath.setPath("/name"); + nameCompositePath.setOrder(CompositePathSortOrder.ASCENDING); + + CompositePath ageCompositePath = new CompositePath(); + ageCompositePath.setPath("/age"); + ageCompositePath.setOrder(CompositePathSortOrder.DESCENDING); + + compositePaths.add(ageCompositePath); + compositePaths.add(nameCompositePath); + + compositeIndexes.add(compositePaths); + indexingPolicy.setCompositeIndexes(compositeIndexes); + */ + + containerProperties.setIndexingPolicy(indexingPolicy); + + // + + Mono containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400); + + // Create container with 400 RU/s + containerIfNotExists.flatMap(containerResponse -> { + container = containerResponse.getContainer(); + logger.info("Checking container " + container.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + + // + } + + private void createFamilies(Flux families) throws Exception { + + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + // Combine multiple item inserts, associated success println's, and a final aggregate stats println into one Reactive stream. + families.flatMap(family -> { + return container.createItem(family); + }) //Flux of item request responses + .flatMap(itemResponse -> { + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + logger.info(String.format("Item ID: %s\n", itemResponse.getResource().getId())); + return Mono.just(itemResponse.getRequestCharge()); + }) //Flux of request charges + .reduce(0.0, + (charge_n,charge_nplus1) -> charge_n + charge_nplus1 + ) //Mono of total charge - there will be only one item in this stream + .subscribe(charge -> { + logger.info(String.format("Created items with total request charge of %.2f\n", + charge)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.info(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); //Preserve the total charge and print aggregate charge/item count stats. + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption",err); + } + + // + } + + private void readItems(Flux familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + familiesToCreate.flatMap(family -> { + Mono> asyncItemResponseMono = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + return asyncItemResponseMono; + }) + .subscribe( + itemResponse -> { + double requestCharge = itemResponse.getRequestCharge(); + Duration requestLatency = itemResponse.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + itemResponse.getResource().getId(), requestCharge, requestLatency)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.info(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption",err); + } + + // + } + + private void queryItems() { + // + // Set some common query options + + FeedOptions queryOptions = new FeedOptions(); + queryOptions.maxItemCount(10); + // Set populate query metrics to get metrics around query executions + queryOptions.populateQueryMetrics(true); + + CosmosContinuablePagedFlux pagedFluxResponse = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + pagedFluxResponse.byPage().subscribe( + fluxResponse -> { + logger.info("Got a page of query result with " + + fluxResponse.getResults().size() + " items(s)" + + " and request charge of " + fluxResponse.getRequestCharge()); + + logger.info("Item Ids " + fluxResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption",err); + } + + // + } +} diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java new file mode 100644 index 0000000..1a1b1cb --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -0,0 +1,249 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.indexmanagement.sync; + +import com.azure.cosmos.*; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +public class SampleIndexManagement { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + * + * @param args command line args. + */ + //
+ public static void main(String[] args) { + SampleIndexManagement p = new SampleIndexManagement(); + + try { + logger.info("Starting SYNC main"); + p.getStartedDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.close(); + } + } + + //
+ + private void getStartedDemo() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + // + + createDatabaseIfNotExists(); + createContainerIfNotExistsWithSpecifiedIndex(); + + + + // Setup family items to create + ArrayList familiesToCreate = new ArrayList<>(); + familiesToCreate.add(Families.getAndersenFamilyItem()); + familiesToCreate.add(Families.getWakefieldFamilyItem()); + familiesToCreate.add(Families.getJohnsonFamilyItem()); + familiesToCreate.add(Families.getSmithFamilyItem()); + + createFamilies(familiesToCreate); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + // + + logger.info("Checking database " + database.getId() + " completed!\n"); + } + + private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); //To turn indexing off set IndexingMode.NONE + + // Included paths + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.setPath("/*"); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + // Excluded paths + List excludedPaths = new ArrayList<>(); + ExcludedPath excludedPath = new ExcludedPath(); + excludedPath.setPath("/name/*"); + excludedPaths.add(excludedPath); + indexingPolicy.setExcludedPaths(excludedPaths); + + // Spatial indices - if you need them, here is how to set them up: + /* + List spatialIndexes = new ArrayList(); + List collectionOfSpatialTypes = new ArrayList(); + + SpatialSpec spec = new SpatialSpec(); + spec.setPath("/locations/*"); + collectionOfSpatialTypes.add(SpatialType.Point); + spec.setSpatialTypes(collectionOfSpatialTypes); + spatialIndexes.add(spec); + + indexingPolicy.setSpatialIndexes(spatialIndexes); + */ + + // Composite indices - if you need them, here is how to set them up: + /* + List> compositeIndexes = new ArrayList<>(); + List compositePaths = new ArrayList<>(); + + CompositePath nameCompositePath = new CompositePath(); + nameCompositePath.setPath("/name"); + nameCompositePath.setOrder(CompositePathSortOrder.ASCENDING); + + CompositePath ageCompositePath = new CompositePath(); + ageCompositePath.setPath("/age"); + ageCompositePath.setOrder(CompositePathSortOrder.DESCENDING); + + compositePaths.add(ageCompositePath); + compositePaths.add(nameCompositePath); + + compositeIndexes.add(compositePaths); + indexingPolicy.setCompositeIndexes(compositeIndexes); + */ + + containerProperties.setIndexingPolicy(indexingPolicy); + + // + + // Create container with 400 RU/s + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + + logger.info("Checking container " + container.getId() + " completed!\n"); + } + + private void createFamilies(List families) throws Exception { + double totalRequestCharge = 0; + for (Family family : families) { + + // + // Create item using container that we created using sync client + + // Use lastName as partitionKey for cosmos item + // Using appropriate partition key improves the performance of database operations + CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); + CosmosItemResponse item = container.createItem(family, new PartitionKey(family.getLastName()), cosmosItemRequestOptions); + // + + // Get request charge and other properties like latency, and diagnostics strings, etc. + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + item.getRequestCharge(), item.getRequestLatency())); + totalRequestCharge += item.getRequestCharge(); + } + logger.info(String.format("Created %d items with total request " + + "charge of %.2f", + families.size(), + totalRequestCharge)); + } + + private void readItems(ArrayList familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + familiesToCreate.forEach(family -> { + // + try { + CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + double requestCharge = item.getRequestCharge(); + Duration requestLatency = item.getRequestLatency(); + System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + item.getResource().getId(), requestCharge, requestLatency)); + } catch (CosmosClientException e) { + e.printStackTrace(); + System.err.println(String.format("Read Item failed with %s", e)); + } + // + }); + } + + private void queryItems() { + // + // Set some common query options + FeedOptions queryOptions = new FeedOptions(); + queryOptions.maxItemCount(10); + // Set populate query metrics to get metrics around query executions + queryOptions.populateQueryMetrics(true); + + CosmosContinuablePagedIterable familiesPagedIterable = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { + logger.info("Got a page of query result with " + + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); + + logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }); + // + } +} diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java new file mode 100644 index 0000000..69013de --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -0,0 +1,181 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + + + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.storedprocedure.async; + +import com.azure.cosmos.*; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.UUID; +import java.util.concurrent.CountDownLatch; + +public class SampleStoredProcedureAsync { + + private CosmosAsyncClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + private String sprocId; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + * + * @param args command line args. + */ + //
+ public static void main(String[] args) { + SampleStoredProcedureAsync p = new SampleStoredProcedureAsync(); + + try { + p.sprocDemo(); + logger.info("Demo complete, please hold while resources are released"); + p.shutdown(); + logger.info("Done.\n"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + p.close(); + } finally { + } + } + + //
+ + private void sprocDemo() throws Exception { + //Setup client, DB + setUp(); + + //Create, list and execute stored procedure + createStoredProcedure(); + readAllSprocs(); + executeStoredProcedure(); + } + + public void setUp() throws Exception{ + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + + client.createDatabaseIfNotExists(databaseName).flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + return Mono.empty(); + }).block(); + + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + database.createContainerIfNotExists(containerProperties, 400).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + return Mono.empty(); + }).block(); + } + + public void shutdown() throws Exception { + //Safe clean & close + deleteStoredProcedure(); + } + + public void createStoredProcedure() throws Exception { + logger.info("Creating stored procedure...\n"); + + sprocId = UUID.randomUUID().toString(); + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,"function() {var x = 11;}"); + container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); + } + + private void readAllSprocs() throws Exception { + + FeedOptions feedOptions = new FeedOptions(); + CosmosContinuablePagedFlux fluxResponse = + container.getScripts().readAllStoredProcedures(feedOptions); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + fluxResponse.flatMap(storedProcedureProperties -> { + logger.info(String.format("Stored Procedure: %s\n",storedProcedureProperties.getId())); + return Mono.empty(); + }).subscribe( + s -> {}, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException)err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> {completionLatch.countDown();} + ); + + completionLatch.await(); + } + + public void executeStoredProcedure() throws Exception { + logger.info(String.format("Executing stored procedure %s...\n\n",sprocId)); + + CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); + options.setPartitionKey(PartitionKey.NONE); + + container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.getResponseAsString(), + executeResponse.getStatusCode(), + //executeResponse.getRequestLatency().toString(), + executeResponse.getRequestCharge())); + return Mono.empty(); + }).block(); + } + + public void deleteStoredProcedure() throws Exception { + logger.info("-Deleting stored procedure...\n"); + container.getScripts() + .getStoredProcedure(sprocId) + .delete().block(); + logger.info("-Closing client instance...\n"); + client.close(); + } +} diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java new file mode 100644 index 0000000..193504f --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.storedprocedure.sync; + +import com.azure.cosmos.*; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.UUID; +import java.util.Iterator; + +public class SampleStoredProcedure { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + private String sprocId; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + * + * @param args command line args. + */ + //
+ public static void main(String[] args) { + SampleStoredProcedure p = new SampleStoredProcedure(); + + try { + p.sprocDemo(); + logger.info("Demo complete, please hold while resources are released"); + p.shutdown(); + logger.info("Done.\n"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + p.close(); + } finally { + } + } + + //
+ + private void sprocDemo() throws Exception { + //Setup client, DB + setUp(); + + //Create, list and execute stored procedure + createStoredProcedure(); + readAllSprocs(); + executeStoredProcedure(); + } + + public void setUp() throws Exception{ + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + } + + public void shutdown() throws Exception { + //Safe clean & close + deleteStoredProcedure(); + } + + public void createStoredProcedure() throws Exception { + logger.info("Creating stored procedure...\n"); + + sprocId = UUID.randomUUID().toString(); + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,"function() {var x = 11;}"); + container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()); + } + + private void readAllSprocs() throws Exception { + + FeedOptions feedOptions = new FeedOptions(); + CosmosContinuablePagedIterable feedResponseIterable = + container.getScripts().readAllStoredProcedures(feedOptions); + + Iterator feedResponseIterator = feedResponseIterable.iterator(); + + while(feedResponseIterator.hasNext()) { + CosmosStoredProcedureProperties storedProcedureProperties = feedResponseIterator.next(); + logger.info(String.format("Stored Procedure: %s\n",storedProcedureProperties)); + } + logger.info("\n"); + } + + public void executeStoredProcedure() throws Exception { + logger.info(String.format("Executing stored procedure %s...\n\n",sprocId)); + + CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); + options.setPartitionKey(PartitionKey.NONE); + CosmosStoredProcedureResponse executeResponse = container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options); + + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.responseAsString(), + executeResponse.getStatusCode(), + //executeResponse.getRequestLatency().toString(), + executeResponse.getRequestCharge())); + } + + public void deleteStoredProcedure() throws Exception { + logger.info("-Deleting stored procedure...\n"); + container.getScripts() + .getStoredProcedure(sprocId) + .delete(); + logger.info("-Closing client instance...\n"); + client.close(); + } +} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java new file mode 100644 index 0000000..2c3b676 --- /dev/null +++ b/src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.rx.examples.multimaster; + +import java.util.Properties; + +public class ConfigurationManager { + public static Properties getAppSettings() { + return System.getProperties(); + } +} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java new file mode 100644 index 0000000..0f7748a --- /dev/null +++ b/src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.rx.examples.multimaster; + +import com.azure.cosmos.implementation.AsyncDocumentClient; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.implementation.Database; +import com.azure.cosmos.implementation.DocumentCollection; +import com.azure.cosmos.implementation.ResourceResponse; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +public class Helpers { + + static public String createDocumentCollectionUri(String databaseName, String collectionName) { + return String.format("/dbs/%s/colls/%s", databaseName, collectionName); + } + + static public String createDatabaseUri(String databaseName) { + return String.format("/dbs/%s", databaseName); + } + + static public Mono createDatabaseIfNotExists(AsyncDocumentClient client, String databaseName) { + + return client.readDatabase("/dbs/" + databaseName, null) + .onErrorResume( + e -> { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + if (dce.getStatusCode() == 404) { + // if doesn't exist create it + + Database d = new Database(); + d.setId(databaseName); + + return client.createDatabase(d, null); + } + } + + return Mono.error(e); + } + ).map(ResourceResponse::getResource).single(); + } + + static public Mono createCollectionIfNotExists(AsyncDocumentClient client, String databaseName, String collectionName) { + return client.readCollection(createDocumentCollectionUri(databaseName, collectionName), null) + .onErrorResume( + e -> { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + if (dce.getStatusCode() == 404) { + // if doesn't exist create it + + DocumentCollection collection = new DocumentCollection(); + collection.setId(collectionName); + + return client.createCollection(createDatabaseUri(databaseName), collection, null); + } + } + + return Mono.error(e); + } + ).map(ResourceResponse::getResource).single(); + } + + static public Mono createCollectionIfNotExists(AsyncDocumentClient client, String databaseName, DocumentCollection collection) { + return client.readCollection(createDocumentCollectionUri(databaseName, collection.getId()), null) + .onErrorResume( + e -> { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + if (dce.getStatusCode() == 404) { + // if doesn't exist create it + + return client.createCollection(createDatabaseUri(databaseName), collection, null); + } + } + + return Mono.error(e); + } + ).map(ResourceResponse::getResource).single(); + } +} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java new file mode 100644 index 0000000..f5bdcf8 --- /dev/null +++ b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java @@ -0,0 +1,858 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.rx.examples.multimaster.samples; + +import com.azure.cosmos.AccessCondition; +import com.azure.cosmos.AccessConditionType; +import com.azure.cosmos.BridgeInternal; +import com.azure.cosmos.ConflictResolutionPolicy; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.FeedResponse; +import com.azure.cosmos.Resource; +import com.azure.cosmos.implementation.AsyncDocumentClient; +import com.azure.cosmos.implementation.Conflict; +import com.azure.cosmos.implementation.Document; +import com.azure.cosmos.implementation.DocumentCollection; +import com.azure.cosmos.implementation.RequestOptions; +import com.azure.cosmos.implementation.ResourceResponse; +import com.azure.cosmos.implementation.StoredProcedure; +import com.azure.cosmos.rx.examples.multimaster.Helpers; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ConflictWorker { + private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class); + + private final Scheduler schedulerForBlockingWork; + private final List clients; + private final String basicCollectionUri; + private final String manualCollectionUri; + private final String lwwCollectionUri; + private final String udpCollectionUri; + private final String databaseName; + private final String basicCollectionName; + private final String manualCollectionName; + private final String lwwCollectionName; + private final String udpCollectionName; + private final ExecutorService executor; + + public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) { + this.clients = new ArrayList<>(); + this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName); + this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName); + this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName); + this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName); + + this.databaseName = databaseName; + this.basicCollectionName = basicCollectionName; + this.manualCollectionName = manualCollectionName; + this.lwwCollectionName = lwwCollectionName; + this.udpCollectionName = udpCollectionName; + + this.executor = Executors.newFixedThreadPool(100); + this.schedulerForBlockingWork = Schedulers.fromExecutor(executor); + } + + public void addClient(AsyncDocumentClient client) { + this.clients.add(client); + } + + private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) { + return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection) + .subscribeOn(schedulerForBlockingWork).block(); + } + + private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) { + + return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName) + .subscribeOn(schedulerForBlockingWork).block(); + } + + private DocumentCollection getCollectionDefForManual(String id) { + DocumentCollection collection = new DocumentCollection(); + collection.setId(id); + ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(); + collection.setConflictResolutionPolicy(policy); + return collection; + } + + private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) { + DocumentCollection collection = new DocumentCollection(); + collection.setId(id); + ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath); + collection.setConflictResolutionPolicy(policy); + return collection; + } + + private DocumentCollection getCollectionDefForCustom(String id, String storedProc) { + DocumentCollection collection = new DocumentCollection(); + collection.setId(id); + ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc); + collection.setConflictResolutionPolicy(policy); + return collection; + } + + public void initialize() throws Exception { + AsyncDocumentClient createClient = this.clients.get(0); + + Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block(); + + DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName); + + DocumentCollection manualCollection = createCollectionIfNotExists(createClient, + Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName)); + + DocumentCollection lwwCollection = createCollectionIfNotExists(createClient, + Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId")); + + DocumentCollection udpCollection = createCollectionIfNotExists(createClient, + Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName, + String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver"))); + + StoredProcedure lwwSproc = new StoredProcedure(); + lwwSproc.setId("resolver"); + lwwSproc.setBody(IOUtils.toString( + getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8")); + + lwwSproc = + getResource(createClient.upsertStoredProcedure( + Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null)); + + } + + private T getResource(Mono> obs) { + return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource(); + } + + public void runManualConflict() throws Exception { + logger.info("\r\nInsert Conflict\r\n"); + this.runInsertConflictOnManual(); + + logger.info("\r\nUPDATE Conflict\r\n"); + this.runUpdateConflictOnManual(); + + logger.info("\r\nDELETE Conflict\r\n"); + this.runDeleteConflictOnManual(); + } + + public void runLWWConflict() throws Exception { + logger.info("\r\nInsert Conflict\r\n"); + this.runInsertConflictOnLWW(); + + logger.info("\r\nUPDATE Conflict\r\n"); + this.runUpdateConflictOnLWW(); + + logger.info("\r\nDELETE Conflict\r\n"); + this.runDeleteConflictOnLWW(); + } + + public void runUDPConflict() throws Exception { + logger.info("\r\nInsert Conflict\r\n"); + this.runInsertConflictOnUdp(); + + logger.info("\r\nUPDATE Conflict\r\n"); + this.runUpdateConflictOnUdp(); + + logger.info("\r\nDELETE Conflict\r\n"); + this.runDeleteConflictOnUdp(); + } + + public void runInsertConflictOnManual() throws Exception { + do { + logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName); + + ArrayList> insertTask = new ArrayList<>(); + + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block(); + + if (conflictDocuments.size() == this.clients.size()) { + logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size()); + + for (Document conflictingInsert : conflictDocuments) { + this.validateManualConflict(this.clients, conflictingInsert); + } + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runUpdateConflictOnManual() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + + conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0) + .block(); + + TimeUnit.SECONDS.sleep(1);//1 Second for write to sync. + + + logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName); + + ArrayList> updateTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(updateTask).collectList().single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size()); + + for (Document conflictingUpdate : conflictDocuments) { + this.validateManualConflict(this.clients, conflictingUpdate); + } + break; + } else { + logger.info("Retrying update to induce conflicts"); + } + } while (true); + } + + public void runDeleteConflictOnManual() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0) + .block(); + + TimeUnit.SECONDS.sleep(10);//1 Second for write to sync. + + logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName); + + ArrayList> deleteTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(deleteTask).collectList() + .subscribeOn(schedulerForBlockingWork) + .single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size()); + + for (Document conflictingDelete : conflictDocuments) { + this.validateManualConflict(this.clients, conflictingDelete); + } + + break; + } else { + logger.info("Retrying update to induce conflicts"); + } + } while (true); + } + + public void runInsertConflictOnLWW() throws Exception { + do { + logger.info("Performing conflicting insert across 3 regions"); + + ArrayList> insertTask = new ArrayList<>(); + + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateLWW(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runUpdateConflictOnLWW() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0) + .block(); + + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri); + + ArrayList> insertTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateLWW(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runDeleteConflictOnLWW() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0) + .block(); + + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri); + + ArrayList> insertTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + if (index % 2 == 1) { + //We delete from region 1, even though region 2 always win. + insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } else { + insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++)); + } + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size()); + + //DELETE should always win. irrespective of LWW. + this.validateLWW(this.clients, conflictDocuments, true); + break; + } else { + logger.info("Retrying update/delete to induce conflicts"); + } + } while (true); + } + + public void runInsertConflictOnUdp() throws Exception { + do { + logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName); + + ArrayList> insertTask = new ArrayList<>(); + + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateUDPAsync(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying insert to induce conflicts"); + } + } while (true); + } + + public void runUpdateConflictOnUdp() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0) + .block(); + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri); + + ArrayList> updateTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } + + List conflictDocuments = Flux.merge(updateTask).collectList().single().block(); + + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size()); + + this.validateUDPAsync(this.clients, conflictDocuments); + + break; + } else { + logger.info("Retrying update to induce conflicts"); + } + } while (true); + } + + public void runDeleteConflictOnUdp() throws Exception { + do { + Document conflictDocument = new Document(); + conflictDocument.setId(UUID.randomUUID().toString()); + + conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0) + .block(); + + TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. + + logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri); + + ArrayList> deleteTask = new ArrayList<>(); + + int index = 0; + for (AsyncDocumentClient client : this.clients) { + if (index % 2 == 1) { + //We delete from region 1, even though region 2 always win. + deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } else { + deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++)); + } + } + + List conflictDocuments = Flux.merge(deleteTask).collectList().single().block(); + + if (conflictDocuments.size() > 1) { + logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size()); + + //DELETE should always win. irrespective of LWW. + this.validateUDPAsync(this.clients, conflictDocuments, true); + break; + } else { + logger.info("Retrying update/delete to induce conflicts"); + } + } while (true); + } + + private Mono tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { + + logger.debug("region: {}", client.getWriteEndpoint()); + BridgeInternal.setProperty(document, "regionId", index); + BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); + return client.createDocument(collectionUri, document, null, false) + .onErrorResume(e -> { + if (hasDocumentClientException(e, 409)) { + return Mono.empty(); + } else { + return Mono.error(e); + } + }).map(ResourceResponse::getResource); + } + + private boolean hasDocumentClientException(Throwable e, int statusCode) { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + return dce.getStatusCode() == statusCode; + } + + return false; + } + + private boolean hasDocumentClientExceptionCause(Throwable e) { + while (e != null) { + if (e instanceof CosmosClientException) { + return true; + } + + e = e.getCause(); + } + return false; + } + + private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) { + while (e != null) { + if (e instanceof CosmosClientException) { + CosmosClientException dce = (CosmosClientException) e; + return dce.getStatusCode() == statusCode; + } + + e = e.getCause(); + } + + return false; + } + + private Mono tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { + BridgeInternal.setProperty(document, "regionId", index); + BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); + + RequestOptions options = new RequestOptions(); + options.setAccessCondition(new AccessCondition()); + options.getAccessCondition().setType(AccessConditionType.IF_MATCH); + options.getAccessCondition().setCondition(document.getETag()); + + + return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> { + + // pre condition failed + if (hasDocumentClientException(e, 412)) { + //Lost synchronously or not document yet. No conflict is induced. + return Mono.empty(); + + } + return Mono.error(e); + }).map(ResourceResponse::getResource); + } + + private Mono tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { + BridgeInternal.setProperty(document, "regionId", index); + BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); + + RequestOptions options = new RequestOptions(); + options.setAccessCondition(new AccessCondition()); + options.getAccessCondition().setType(AccessConditionType.IF_MATCH); + options.getAccessCondition().setCondition(document.getETag()); + + + return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> { + + // pre condition failed + if (hasDocumentClientException(e, 412)) { + //Lost synchronously. No conflict is induced. + return Mono.empty(); + + } + return Mono.error(e); + }).map(rr -> document); + } + + private void validateManualConflict(List clients, Document conflictDocument) throws Exception { + boolean conflictExists = false; + for (AsyncDocumentClient client : clients) { + conflictExists = this.validateManualConflict(client, conflictDocument); + } + + if (conflictExists) { + this.deleteConflict(conflictDocument); + } + } + + private boolean isDelete(Conflict conflict) { + return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete"); + } + + + private boolean equals(String a, String b) { + return StringUtils.equals(a, b); + } + + private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception { + while (true) { + FeedResponse response = client.readConflicts(this.manualCollectionUri, null) + .take(1).single().block(); + + for (Conflict conflict : response.getResults()) { + if (!isDelete(conflict)) { + Document conflictDocumentContent = conflict.getResource(Document.class); + if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) { + if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) && + equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) { + logger.info("Document from Region {} lost conflict @ {}", + conflictDocument.getId(), + conflictDocument.getInt("regionId"), + client.getReadEndpoint()); + return true; + } else { + try { + //Checking whether this is the winner. + Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null) + .single().block().getResource(); + logger.info("Document from region {} won the conflict @ {}", + conflictDocument.getInt("regionId"), + client.getReadEndpoint()); + return false; + } + catch (Exception exception) { + if (hasDocumentClientException(exception, 404)) { + throw exception; + } else { + logger.info( + "Document from region {} not found @ {}", + conflictDocument.getInt("regionId"), + client.getReadEndpoint()); + } + } + } + } + } else { + if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) { + logger.info("DELETE conflict found @ {}", + client.getReadEndpoint()); + return false; + } + } + } + + logger.error("Document {} is not found in conflict feed @ {}, retrying", + conflictDocument.getId(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } + } + + private void deleteConflict(Document conflictDocument) { + AsyncDocumentClient delClient = clients.get(0); + + FeedResponse conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block(); + + for (Conflict conflict : conflicts.getResults()) { + if (!isDelete(conflict)) { + Document conflictContent = conflict.getResource(Document.class); + if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId()) + && equals(conflictContent.getETag(), conflictDocument.getETag())) { + logger.info("Deleting manual conflict {} from region {}", + conflict.getSourceResourceId(), + conflictContent.getInt("regionId")); + delClient.deleteConflict(conflict.getSelfLink(), null) + .single().block(); + + } + } else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) { + logger.info("Deleting manual conflict {} from region {}", + conflict.getSourceResourceId(), + conflictDocument.getInt("regionId")); + delClient.deleteConflict(conflict.getSelfLink(), null) + .single().block(); + } + } + } + + private void validateLWW(List clients, List conflictDocument) throws Exception { + validateLWW(clients, conflictDocument, false); + } + + + private void validateLWW(List clients, List conflictDocument, boolean hasDeleteConflict) throws Exception { + for (AsyncDocumentClient client : clients) { + this.validateLWW(client, conflictDocument, hasDeleteConflict); + } + } + + private void validateLWW(AsyncDocumentClient client, List conflictDocument, boolean hasDeleteConflict) throws Exception { + FeedResponse response = client.readConflicts(this.lwwCollectionUri, null) + .take(1).single().block(); + + if (response.getResults().size() != 0) { + logger.error("Found {} conflicts in the lww collection", response.getResults().size()); + return; + } + + if (hasDeleteConflict) { + do { + try { + client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block(); + + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).getId(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } catch (Exception exception) { + if (!hasDocumentClientExceptionCause(exception)) { + throw exception; + } + + // NotFound + if (hasDocumentClientExceptionCause(exception, 404)) { + + logger.info("DELETE conflict won @ {}", client.getReadEndpoint()); + return; + } else { + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).getId(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } + } + } while (true); + } + + Document winnerDocument = null; + + for (Document document : conflictDocument) { + if (winnerDocument == null || + winnerDocument.getInt("regionId") <= document.getInt("regionId")) { + winnerDocument = document; + } + } + + logger.info("Document from region {} should be the winner", + winnerDocument.getInt("regionId")); + + while (true) { + try { + Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null) + .single().block().getResource(); + + if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) { + logger.info("Winner document from region {} found at {}", + existingDocument.getInt("regionId"), + client.getReadEndpoint()); + break; + } else { + logger.error("Winning document version from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } catch (Exception e) { + logger.error("Winner document from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } + } + + private void validateUDPAsync(List clients, List conflictDocument) throws Exception { + validateUDPAsync(clients, conflictDocument, false); + } + + private void validateUDPAsync(List clients, List conflictDocument, boolean hasDeleteConflict) throws Exception { + for (AsyncDocumentClient client : clients) { + this.validateUDPAsync(client, conflictDocument, hasDeleteConflict); + } + } + + private String documentNameLink(String collectionId, String documentId) { + return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId); + } + + private void validateUDPAsync(AsyncDocumentClient client, List conflictDocument, boolean hasDeleteConflict) throws Exception { + FeedResponse response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block(); + + if (response.getResults().size() != 0) { + logger.error("Found {} conflicts in the udp collection", response.getResults().size()); + return; + } + + if (hasDeleteConflict) { + do { + try { + client.readDocument( + documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null) + .single().block(); + + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).getId(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + + } catch (Exception exception) { + if (hasDocumentClientExceptionCause(exception, 404)) { + logger.info("DELETE conflict won @ {}", client.getReadEndpoint()); + return; + } else { + logger.error("DELETE conflict for document {} didnt win @ {}", + conflictDocument.get(0).getId(), + client.getReadEndpoint()); + + TimeUnit.MILLISECONDS.sleep(500); + } + } + } while (true); + } + + Document winnerDocument = null; + + for (Document document : conflictDocument) { + if (winnerDocument == null || + winnerDocument.getInt("regionId") <= document.getInt("regionId")) { + winnerDocument = document; + } + } + + logger.info("Document from region {} should be the winner", + winnerDocument.getInt("regionId")); + + while (true) { + try { + + Document existingDocument = client.readDocument( + documentNameLink(udpCollectionName, winnerDocument.getId()), null) + .single().block().getResource(); + + if (existingDocument.getInt("regionId") == winnerDocument.getInt( + ("regionId"))) { + logger.info("Winner document from region {} found at {}", + existingDocument.getInt("regionId"), + client.getReadEndpoint()); + break; + } else { + logger.error("Winning document version from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } catch (Exception e) { + logger.error("Winner document from region {} is not found @ {}, retrying...", + winnerDocument.getInt("regionId"), + client.getWriteEndpoint()); + TimeUnit.MILLISECONDS.sleep(500); + } + } + } + + public void shutdown() { + this.executor.shutdown(); + for(AsyncDocumentClient client: clients) { + client.close(); + } + } +} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java new file mode 100644 index 0000000..becfb12 --- /dev/null +++ b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.rx.examples.multimaster.samples; + +import com.azure.cosmos.rx.examples.multimaster.ConfigurationManager; +import org.apache.commons.io.IOUtils; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; + + +public class Main { + public static void main(String[] args) throws Exception { + + if (args.length != 1) { + help(); + System.exit(1); + } + + try (InputStream inputStream = new FileInputStream(args[0])) { + ConfigurationManager.getAppSettings().load(inputStream); + System.out.println("Using file " + args[0] + " for the setting."); + } + + Main.runScenarios(); + } + + private static void runScenarios() throws Exception { + MultiMasterScenario scenario = new MultiMasterScenario(); + scenario.initialize(); + + scenario.runBasic(); + + scenario.runManualConflict(); + scenario.runLWW(); + scenario.runUDP(); + + System.out.println("Finished"); + + //shutting down the active the resources + scenario.shutdown(); + } + + private static void help() throws IOException { + System.out.println("Provide the path to setting file in the following format: "); + try (InputStream inputStream = + Main.class.getClassLoader() + .getResourceAsStream("multi-master-sample-config.properties")) { + + IOUtils.copy(inputStream, System.out); + + System.out.println(); + } catch (Exception e) { + throw e; + } + } +} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java new file mode 100644 index 0000000..c3ba36a --- /dev/null +++ b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.rx.examples.multimaster.samples; + +import com.azure.cosmos.implementation.AsyncDocumentClient; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.rx.examples.multimaster.ConfigurationManager; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class MultiMasterScenario { + + private final static Logger logger = LoggerFactory.getLogger(MultiMasterScenario.class); + + final private String accountEndpoint; + final private String accountKey; + final private List workers; + final private ConflictWorker conflictWorker; + + public MultiMasterScenario() { + this.accountEndpoint = ConfigurationManager.getAppSettings().getProperty("endpoint"); + this.accountKey = ConfigurationManager.getAppSettings().getProperty("key"); + + String databaseName = ConfigurationManager.getAppSettings().getProperty("databaseName"); + String manualCollectionName = ConfigurationManager.getAppSettings().getProperty("manualCollectionName"); + String lwwCollectionName = ConfigurationManager.getAppSettings().getProperty("lwwCollectionName"); + String udpCollectionName = ConfigurationManager.getAppSettings().getProperty("udpCollectionName"); + String basicCollectionName = ConfigurationManager.getAppSettings().getProperty("basicCollectionName"); + String regionsAsString = ConfigurationManager.getAppSettings().getProperty("regions"); + Preconditions.checkNotNull(regionsAsString, "regions is required"); + String[] regions = regionsAsString.split(";"); + Preconditions.checkArgument(regions.length > 0, "at least one region is required"); + Preconditions.checkNotNull(accountEndpoint, "accountEndpoint is required"); + Preconditions.checkNotNull(accountKey, "accountKey is required"); + Preconditions.checkNotNull(databaseName, "databaseName is required"); + Preconditions.checkNotNull(manualCollectionName, "manualCollectionName is required"); + Preconditions.checkNotNull(lwwCollectionName, "lwwCollectionName is required"); + Preconditions.checkNotNull(udpCollectionName, "udpCollectionName is required"); + Preconditions.checkNotNull(basicCollectionName, "basicCollectionName is required"); + + this.workers = new ArrayList<>(); + this.conflictWorker = new ConflictWorker(databaseName, basicCollectionName, manualCollectionName, lwwCollectionName, udpCollectionName); + + for (String region : regions) { + ConnectionPolicy policy = new ConnectionPolicy(); + policy.setUsingMultipleWriteLocations(true); + policy.setPreferredLocations(Collections.singletonList(region)); + + AsyncDocumentClient client = + new AsyncDocumentClient.Builder() + .withMasterKeyOrResourceToken(this.accountKey) + .withServiceEndpoint(this.accountEndpoint) + .withConsistencyLevel(ConsistencyLevel.EVENTUAL) + .withConnectionPolicy(policy).build(); + + + workers.add(new Worker(client, databaseName, basicCollectionName)); + + conflictWorker.addClient(client); + } + } + + public void initialize() throws Exception { + this.conflictWorker.initialize(); + logger.info("Initialized collections."); + } + + public void runBasic() throws Exception { + logger.info("\n####################################################"); + logger.info("Basic Active-Active"); + logger.info("####################################################"); + + logger.info("1) Starting insert loops across multiple regions ..."); + + List> basicTask = new ArrayList<>(); + + int documentsToInsertPerWorker = 100; + + for (Worker worker : this.workers) { + basicTask.add(worker.runLoopAsync(documentsToInsertPerWorker)); + } + + Mono.when(basicTask).block(); + + basicTask.clear(); + + logger.info("2) Reading from every region ..."); + + int expectedDocuments = this.workers.size() * documentsToInsertPerWorker; + for (Worker worker : this.workers) { + basicTask.add(worker.readAllAsync(expectedDocuments)); + } + + Mono.when(basicTask).block(); + + basicTask.clear(); + + logger.info("3) Deleting all the documents ..."); + + this.workers.get(0).deleteAll(); + + logger.info("####################################################"); + } + + public void runManualConflict() throws Exception { + logger.info("\n####################################################"); + logger.info("Manual Conflict Resolution"); + logger.info("####################################################"); + + this.conflictWorker.runManualConflict(); + logger.info("####################################################"); + } + + public void runLWW() throws Exception { + logger.info("\n####################################################"); + logger.info("LWW Conflict Resolution"); + logger.info("####################################################"); + + this.conflictWorker.runLWWConflict(); + logger.info("####################################################"); + } + + public void runUDP() throws Exception { + logger.info("\n####################################################"); + logger.info("UDP Conflict Resolution"); + logger.info("####################################################"); + + this.conflictWorker.runUDPConflict(); + logger.info("####################################################"); + } + + public void shutdown() { + conflictWorker.shutdown(); + for(Worker worker: this.workers) { + worker.shutdown(); + } + } +} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java new file mode 100644 index 0000000..4addb75 --- /dev/null +++ b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java @@ -0,0 +1,166 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.rx.examples.multimaster.samples; + + +import com.azure.cosmos.implementation.AsyncDocumentClient; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.implementation.Document; +import com.azure.cosmos.FeedOptions; +import com.azure.cosmos.FeedResponse; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class Worker { + private final static Logger logger = LoggerFactory.getLogger(Worker.class); + + private final AsyncDocumentClient client; + private final String documentCollectionUri; + + // scheduler for blocking work + private final Scheduler schedulerForBlockingWork; + private final ExecutorService executor; + + public Worker(AsyncDocumentClient client, String databaseName, String collectionName) { + this.client = client; + this.documentCollectionUri = String.format("/dbs/%s/colls/%s", databaseName, collectionName); + this.executor = Executors.newSingleThreadExecutor(); + this.schedulerForBlockingWork = Schedulers.fromExecutor(executor); + } + + public Mono runLoopAsync(int documentsToInsert) { + return Mono.defer(() -> { + + int iterationCount = 0; + + List latency = new ArrayList<>(); + while (iterationCount++ < documentsToInsert) { + long startTick = System.currentTimeMillis(); + + Document d = new Document(); + d.setId(UUID.randomUUID().toString()); + + this.client.createDocument(this.documentCollectionUri, d, null, false) + .subscribeOn(schedulerForBlockingWork).single().block(); + + long endTick = System.currentTimeMillis(); + + latency.add(endTick - startTick); + } + + Collections.sort(latency); + int p50Index = (latency.size() / 2); + + logger.info("Inserted {} documents at {} with p50 {} ms", + documentsToInsert, + this.client.getWriteEndpoint(), + latency.get(p50Index)); + + return Mono.empty(); + + }); + + } + + + public Mono readAllAsync(int expectedNumberOfDocuments) { + + return Mono.defer(() -> { + + while (true) { + int totalItemRead = 0; + FeedResponse response = null; + do { + + FeedOptions options = new FeedOptions(); + options.requestContinuation(response != null ? response.getContinuationToken() : null); + + response = this.client.readDocuments(this.documentCollectionUri, options).take(1) + .subscribeOn(schedulerForBlockingWork).single().block(); + + totalItemRead += response.getResults().size(); + } while (response.getContinuationToken() != null); + + if (totalItemRead < expectedNumberOfDocuments) { + logger.info("Total item read {} from {} is less than {}, retrying reads", + totalItemRead, + this.client.getReadEndpoint(), + expectedNumberOfDocuments); + + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + logger.info("interrupted"); + break; + } + continue; + } else { + logger.info("READ {} items from {}", totalItemRead, this.client.getReadEndpoint()); + break; + } + } + + return Mono.empty(); + }); + } + + void deleteAll() { + List documents = new ArrayList<>(); + FeedResponse response = null; + do { + + FeedOptions options = new FeedOptions(); + options.requestContinuation(response != null ? response.getContinuationToken() : null); + + response = this.client.readDocuments(this.documentCollectionUri, options).take(1) + .subscribeOn(schedulerForBlockingWork).single().block(); + + documents.addAll(response.getResults()); + } while (response.getContinuationToken() != null); + + for (Document document : documents) { + try { + this.client.deleteDocument(document.getSelfLink(), null) + .subscribeOn(schedulerForBlockingWork).single().block(); + } catch (RuntimeException exEx) { + CosmosClientException dce = getDocumentClientExceptionCause(exEx); + + if (dce.getStatusCode() != 404) { + logger.info("Error occurred while deleting {} from {}", dce, client.getWriteEndpoint()); + } + } + } + + logger.info("Deleted all documents from region {}", this.client.getWriteEndpoint()); + } + + private CosmosClientException getDocumentClientExceptionCause(Throwable e) { + while (e != null) { + + if (e instanceof CosmosClientException) { + return (CosmosClientException) e; + } + + e = e.getCause(); + } + + return null; + } + + public void shutdown() { + executor.shutdown(); + client.close(); + } +} diff --git a/src/main/resources/log4j2.properties b/src/main/resources/log4j2.properties new file mode 100644 index 0000000..dd96ec9 --- /dev/null +++ b/src/main/resources/log4j2.properties @@ -0,0 +1,21 @@ +# this is the log4j configuration for tests + +# Set root logger level to WARN and its appender to STDOUT. +rootLogger.level = warn +rootLogger.appenderRef.stdout.ref = STDOUT + +logger.netty.name = io.netty +logger.netty.level = info + +logger.reactor.name = io.reactivex +logger.reactor.level = info + +logger.cosmos.name = com.azure.data.cosmos.rx.examples.multimaster +logger.cosmos.level = info + +# STDOUT is a ConsoleAppender and uses PatternLayout. +appender.console.name = STDOUT +appender.console.type = Console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %m%n + diff --git a/src/main/resources/multi-master-sample-config.properties b/src/main/resources/multi-master-sample-config.properties new file mode 100644 index 0000000..42c2030 --- /dev/null +++ b/src/main/resources/multi-master-sample-config.properties @@ -0,0 +1,8 @@ +endpoint= +key= +regions=North Central US;North Europe;Southeast Asia +databaseName=multiMasterDemoDB +manualCollectionName=myManualCollection +lwwCollectionName=myLwwCollection +udpCollectionName=myUdpCollection +basicCollectionName=myBasicCollection \ No newline at end of file diff --git a/src/main/resources/resolver-storedproc.txt b/src/main/resources/resolver-storedproc.txt new file mode 100644 index 0000000..e856721 --- /dev/null +++ b/src/main/resources/resolver-storedproc.txt @@ -0,0 +1,45 @@ +function resolver(incomingRecord, existingRecord, isTombstone, conflictingRecords) { + var collection = getContext().getCollection(); + if (!incomingRecord) { + if (existingRecord) { + collection.deleteDocument(existingRecord._self, {}, function(err, responseOptions) { + if (err) throw err; + }); + } + } else if (isTombstone) { + // delete always wins. + } else { + var documentToUse = incomingRecord; + if (existingRecord) { + if (documentToUse.regionId < existingRecord.regionId) { + documentToUse = existingRecord; + } + } + var i; + for (i = 0; i < conflictingRecords.length; i++) { + if (documentToUse.regionId < conflictingRecords[i].regionId) { + documentToUse = conflictingRecords[i]; + } + } + tryDelete(conflictingRecords, incomingRecord, existingRecord, documentToUse); + } + function tryDelete(documents, incoming, existing, documentToInsert) { + if (documents.length > 0) { + collection.deleteDocument(documents[0]._self, {}, function(err, responseOptions) { + if (err) throw err; + documents.shift(); + tryDelete(documents, incoming, existing, documentToInsert); + }); + } else if (existing) { + collection.replaceDocument(existing._self, documentToInsert, + function(err, documentCreated) { + if (err) throw err; + }); + } else { + collection.createDocument(collection.getSelfLink(), documentToInsert, + function(err, documentCreated) { + if (err) throw err; + }); + } + } +} \ No newline at end of file From de6154d41407ed1015603083b43e9edacf1eacc7 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 17:14:21 -0800 Subject: [PATCH 002/110] Updated .gitignore --- .gitignore | 40 +++++++++++++++++++ .idea/.gitignore | 2 + .idea/.name | 1 + .idea/azure-cosmos-java-sql-api-samples.iml | 9 +++++ .idea/codeStyles/Project.xml | 19 +++++++++ .idea/codeStyles/codeStyleConfig.xml | 5 +++ .idea/compiler.xml | 13 ++++++ .idea/encodings.xml | 7 ++++ ...azure_core_1_3_0_beta_1_dev_20200207_1.xml | 13 ++++++ ...com_azure_azure_cosmos_4_0_0_preview_2.xml | 13 ++++++ ...ackson_core_jackson_annotations_2_10_1.xml | 13 ++++++ ...erxml_jackson_core_jackson_core_2_10_1.xml | 13 ++++++ ...l_jackson_core_jackson_databind_2_10_1.xml | 13 ++++++ ...atatype_jackson_datatype_jsr310_2_10_1.xml | 13 ++++++ ...dule_jackson_module_afterburner_2_10_1.xml | 13 ++++++ ...sterxml_uuid_java_uuid_generator_3_1_5.xml | 13 ++++++ ..._com_google_code_findbugs_jsr305_3_0_2.xml | 13 ++++++ ...rorprone_error_prone_annotations_2_2_0.xml | 13 ++++++ ...__com_google_guava_failureaccess_1_0_1.xml | 13 ++++++ ...ven__com_google_guava_guava_27_0_1_jre.xml | 13 ++++++ ...9_0_empty_to_avoid_conflict_with_guava.xml | 13 ++++++ ...m_google_j2objc_j2objc_annotations_1_1.xml | 13 ++++++ .../Maven__commons_io_commons_io_2_5.xml | 13 ++++++ ..._dropwizard_metrics_metrics_core_4_1_0.xml | 13 ++++++ ...n__io_micrometer_micrometer_core_1_2_0.xml | 13 ++++++ ...en__io_netty_netty_buffer_4_1_45_Final.xml | 13 ++++++ ...ven__io_netty_netty_codec_4_1_45_Final.xml | 13 ++++++ ...o_netty_netty_codec_http2_4_1_45_Final.xml | 13 ++++++ ...io_netty_netty_codec_http_4_1_45_Final.xml | 13 ++++++ ...o_netty_netty_codec_socks_4_1_45_Final.xml | 13 ++++++ ...en__io_netty_netty_common_4_1_45_Final.xml | 13 ++++++ ...n__io_netty_netty_handler_4_1_45_Final.xml | 13 ++++++ ...netty_netty_handler_proxy_4_1_45_Final.xml | 13 ++++++ ...__io_netty_netty_resolver_4_1_45_Final.xml | 13 ++++++ ..._io_netty_netty_transport_4_1_45_Final.xml | 13 ++++++ ...native_epoll_linux_x86_64_4_1_45_Final.xml | 13 ++++++ ...nsport_native_unix_common_4_1_45_Final.xml | 13 ++++++ ...ctor_netty_reactor_netty_0_9_4_RELEASE.xml | 13 ++++++ ...jectreactor_reactor_core_3_3_2_RELEASE.xml | 13 ++++++ ...pache_commons_commons_collections4_4_2.xml | 13 ++++++ ...org_apache_commons_commons_lang3_3_8_1.xml | 13 ++++++ ...n__org_apache_commons_commons_text_1_6.xml | 13 ++++++ ...rg_checkerframework_checker_qual_2_5_2.xml | 13 ++++++ ...s_mojo_animal_sniffer_annotations_1_17.xml | 13 ++++++ ...__org_hdrhistogram_HdrHistogram_2_1_11.xml | 13 ++++++ ...n__org_latencyutils_LatencyUtils_2_0_3.xml | 13 ++++++ ...reactivestreams_reactive_streams_1_0_3.xml | 13 ++++++ .../Maven__org_slf4j_slf4j_api_1_7_6.xml | 13 ++++++ .idea/misc.xml | 14 +++++++ .idea/modules.xml | 8 ++++ .idea/vcs.xml | 6 +++ 51 files changed, 644 insertions(+) create mode 100644 .idea/.gitignore create mode 100644 .idea/.name create mode 100644 .idea/azure-cosmos-java-sql-api-samples.iml create mode 100644 .idea/codeStyles/Project.xml create mode 100644 .idea/codeStyles/codeStyleConfig.xml create mode 100644 .idea/compiler.xml create mode 100644 .idea/encodings.xml create mode 100644 .idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml create mode 100644 .idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml create mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml create mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml create mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml create mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml create mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml create mode 100644 .idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml create mode 100644 .idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml create mode 100644 .idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml create mode 100644 .idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml create mode 100644 .idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml create mode 100644 .idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml create mode 100644 .idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml create mode 100644 .idea/libraries/Maven__commons_io_commons_io_2_5.xml create mode 100644 .idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml create mode 100644 .idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml create mode 100644 .idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml create mode 100644 .idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml create mode 100644 .idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml create mode 100644 .idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml create mode 100644 .idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml create mode 100644 .idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml create mode 100644 .idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml create mode 100644 .idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml create mode 100644 .idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml create mode 100644 .idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml create mode 100644 .idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/vcs.xml diff --git a/.gitignore b/.gitignore index 0e13eeb..00a9c64 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,43 @@ buildNumber.properties .mvn/timing.properties # https://github.com/takari/maven-wrapper#usage-without-binary-jar .mvn/wrapper/maven-wrapper.jar + +# Compiled class file +*.class + +# Log file +*.log + +# BlueJ files +*.ctxt + +# idea files +/.idea/ + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.nar +*.ear +*.zip +*.tar.gz +*.rar + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + +# Eclipse project file +*.project + +# Maven autogenerated classpath +*.classpath + +#VSCode Directories +/*.settings/ +/*.vscode/ +/target/ + +*.iml diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..e7e9d11 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,2 @@ +# Default ignored files +/workspace.xml diff --git a/.idea/.name b/.idea/.name new file mode 100644 index 0000000..c486999 --- /dev/null +++ b/.idea/.name @@ -0,0 +1 @@ +Unknown \ No newline at end of file diff --git a/.idea/azure-cosmos-java-sql-api-samples.iml b/.idea/azure-cosmos-java-sql-api-samples.iml new file mode 100644 index 0000000..d6ebd48 --- /dev/null +++ b/.idea/azure-cosmos-java-sql-api-samples.iml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml new file mode 100644 index 0000000..0940777 --- /dev/null +++ b/.idea/codeStyles/Project.xml @@ -0,0 +1,19 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml new file mode 100644 index 0000000..a55e7a1 --- /dev/null +++ b/.idea/codeStyles/codeStyleConfig.xml @@ -0,0 +1,5 @@ + + + + \ No newline at end of file diff --git a/.idea/compiler.xml b/.idea/compiler.xml new file mode 100644 index 0000000..1aae09c --- /dev/null +++ b/.idea/compiler.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/encodings.xml b/.idea/encodings.xml new file mode 100644 index 0000000..aa00ffa --- /dev/null +++ b/.idea/encodings.xml @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml b/.idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml new file mode 100644 index 0000000..f8894b4 --- /dev/null +++ b/.idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml b/.idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml new file mode 100644 index 0000000..81e8e57 --- /dev/null +++ b/.idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml new file mode 100644 index 0000000..81b7257 --- /dev/null +++ b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml new file mode 100644 index 0000000..f1b25f9 --- /dev/null +++ b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml new file mode 100644 index 0000000..bdf5b51 --- /dev/null +++ b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml new file mode 100644 index 0000000..07259be --- /dev/null +++ b/.idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml new file mode 100644 index 0000000..9182d62 --- /dev/null +++ b/.idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml b/.idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml new file mode 100644 index 0000000..a82aa72 --- /dev/null +++ b/.idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml b/.idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml new file mode 100644 index 0000000..1c380d0 --- /dev/null +++ b/.idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml b/.idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml new file mode 100644 index 0000000..df0c40d --- /dev/null +++ b/.idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml b/.idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml new file mode 100644 index 0000000..36e948e --- /dev/null +++ b/.idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml b/.idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml new file mode 100644 index 0000000..e631133 --- /dev/null +++ b/.idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml b/.idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml new file mode 100644 index 0000000..4e15702 --- /dev/null +++ b/.idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml b/.idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml new file mode 100644 index 0000000..c06f999 --- /dev/null +++ b/.idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__commons_io_commons_io_2_5.xml b/.idea/libraries/Maven__commons_io_commons_io_2_5.xml new file mode 100644 index 0000000..67c2ad2 --- /dev/null +++ b/.idea/libraries/Maven__commons_io_commons_io_2_5.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml b/.idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml new file mode 100644 index 0000000..a4702c2 --- /dev/null +++ b/.idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml b/.idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml new file mode 100644 index 0000000..10f1f86 --- /dev/null +++ b/.idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml new file mode 100644 index 0000000..86e716b --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml new file mode 100644 index 0000000..20585b5 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml new file mode 100644 index 0000000..a80580a --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml new file mode 100644 index 0000000..f6d6e45 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml new file mode 100644 index 0000000..18ab7b1 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml new file mode 100644 index 0000000..36e6da4 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml new file mode 100644 index 0000000..44a05a9 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml new file mode 100644 index 0000000..d7e8065 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml new file mode 100644 index 0000000..e1aed92 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml new file mode 100644 index 0000000..0d498f9 --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml new file mode 100644 index 0000000..eb9d94d --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml new file mode 100644 index 0000000..46db9da --- /dev/null +++ b/.idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml b/.idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml new file mode 100644 index 0000000..3ad6d23 --- /dev/null +++ b/.idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml b/.idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml new file mode 100644 index 0000000..e0296e0 --- /dev/null +++ b/.idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml b/.idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml new file mode 100644 index 0000000..1779477 --- /dev/null +++ b/.idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml b/.idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml new file mode 100644 index 0000000..33b78e9 --- /dev/null +++ b/.idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml b/.idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml new file mode 100644 index 0000000..5a2fa29 --- /dev/null +++ b/.idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml b/.idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml new file mode 100644 index 0000000..ad0d4fd --- /dev/null +++ b/.idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml b/.idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml new file mode 100644 index 0000000..5c3a057 --- /dev/null +++ b/.idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml b/.idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml new file mode 100644 index 0000000..a1be137 --- /dev/null +++ b/.idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml b/.idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml new file mode 100644 index 0000000..bf68169 --- /dev/null +++ b/.idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml b/.idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml new file mode 100644 index 0000000..f17253b --- /dev/null +++ b/.idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml b/.idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml new file mode 100644 index 0000000..65280d3 --- /dev/null +++ b/.idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..01b1034 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,14 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..bf36d5d --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file From b140ac480e366fe15840a5e6adfe31d000c63b9d Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 17:15:16 -0800 Subject: [PATCH 003/110] Modified .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 00a9c64..ab65e5c 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,7 @@ buildNumber.properties *.ctxt # idea files -/.idea/ +.idea/ # Mobile Tools for Java (J2ME) .mtj.tmp/ From 87579f26e1f716c59a90ad7e16f4d309c2c33ef4 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 17:16:05 -0800 Subject: [PATCH 004/110] Modified .gitignore --- .gitignore | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index ab65e5c..9b01c32 100644 --- a/.gitignore +++ b/.gitignore @@ -1,15 +1,3 @@ -target/ -pom.xml.tag -pom.xml.releaseBackup -pom.xml.versionsBackup -pom.xml.next -release.properties -dependency-reduced-pom.xml -buildNumber.properties -.mvn/timing.properties -# https://github.com/takari/maven-wrapper#usage-without-binary-jar -.mvn/wrapper/maven-wrapper.jar - # Compiled class file *.class @@ -20,7 +8,7 @@ buildNumber.properties *.ctxt # idea files -.idea/ +/.idea/ # Mobile Tools for Java (J2ME) .mtj.tmp/ From 17e14e96fd49a818fcc5bd2a8b5a709f117233ba Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 17:17:03 -0800 Subject: [PATCH 005/110] Updated .gitignore --- .gitignore | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.gitignore b/.gitignore index 9b01c32..00a9c64 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,15 @@ +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +pom.xml.next +release.properties +dependency-reduced-pom.xml +buildNumber.properties +.mvn/timing.properties +# https://github.com/takari/maven-wrapper#usage-without-binary-jar +.mvn/wrapper/maven-wrapper.jar + # Compiled class file *.class From 41c73492a2e1894661b875327a2de703f3b40a6c Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 17:27:31 -0800 Subject: [PATCH 006/110] pom.xml changes --- .idea/azure-cosmos-java-sql-api-samples.iml | 53 +++++++++++++++-- .idea/compiler.xml | 10 +++- pom.xml | 65 +++++++++++++++++++++ 3 files changed, 123 insertions(+), 5 deletions(-) create mode 100644 pom.xml diff --git a/.idea/azure-cosmos-java-sql-api-samples.iml b/.idea/azure-cosmos-java-sql-api-samples.iml index d6ebd48..47b3dd5 100644 --- a/.idea/azure-cosmos-java-sql-api-samples.iml +++ b/.idea/azure-cosmos-java-sql-api-samples.iml @@ -1,9 +1,54 @@ - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/compiler.xml b/.idea/compiler.xml index 1aae09c..c4306b9 100644 --- a/.idea/compiler.xml +++ b/.idea/compiler.xml @@ -1,13 +1,21 @@ + + + + + + + + \ No newline at end of file diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..2a619b8 --- /dev/null +++ b/pom.xml @@ -0,0 +1,65 @@ + + + 4.0.0 + + com.azure + azure-cosmos-java-sql-api-samples + 1.0-SNAPSHOT + Get Started With Sync / Async Java SDK for SQL API of Azure Cosmos DB Database Service + + UTF-8 + + + + + maven-compiler-plugin + 3.1 + + 1.8 + 1.8 + + + + org.codehaus.mojo + exec-maven-plugin + 1.6.0 + + + sync + + com.azure.cosmos.sample.sync.SyncMain + + + + async + + com.azure.cosmos.sample.async.AsyncMain + + + + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + + + + com.azure + azure-cosmos + 4.0.0-preview.2 + + + + \ No newline at end of file From 708c0b6ca410de789453c4902be54c96f706d375 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 17:43:07 -0800 Subject: [PATCH 007/110] Working pom.xml --- .idea/azure-cosmos-java-sql-api-samples.iml | 23 +++++++--------- ...azure_core_1_3_0_beta_1_dev_20200207_1.xml | 13 --------- ...com_azure_azure_cosmos_4_0_0_preview_2.xml | 13 --------- ...ackson_core_jackson_annotations_2_10_1.xml | 13 --------- ...erxml_jackson_core_jackson_core_2_10_1.xml | 13 --------- ...l_jackson_core_jackson_databind_2_10_1.xml | 13 --------- ...atatype_jackson_datatype_jsr310_2_10_1.xml | 13 --------- ...dule_jackson_module_afterburner_2_10_1.xml | 13 --------- ...sterxml_uuid_java_uuid_generator_3_1_5.xml | 13 --------- ..._com_google_code_findbugs_jsr305_3_0_2.xml | 13 --------- ...rorprone_error_prone_annotations_2_2_0.xml | 13 --------- ...__com_google_guava_failureaccess_1_0_1.xml | 13 --------- ...ven__com_google_guava_guava_27_0_1_jre.xml | 13 --------- ...9_0_empty_to_avoid_conflict_with_guava.xml | 13 --------- ...m_google_j2objc_j2objc_annotations_1_1.xml | 13 --------- .../Maven__commons_io_commons_io_2_5.xml | 13 --------- ..._dropwizard_metrics_metrics_core_4_1_0.xml | 13 --------- ...n__io_micrometer_micrometer_core_1_2_0.xml | 13 --------- ...en__io_netty_netty_buffer_4_1_45_Final.xml | 13 --------- ...ven__io_netty_netty_codec_4_1_45_Final.xml | 13 --------- ...o_netty_netty_codec_http2_4_1_45_Final.xml | 13 --------- ...io_netty_netty_codec_http_4_1_45_Final.xml | 13 --------- ...o_netty_netty_codec_socks_4_1_45_Final.xml | 13 --------- ...en__io_netty_netty_common_4_1_45_Final.xml | 13 --------- ...n__io_netty_netty_handler_4_1_45_Final.xml | 13 --------- ...netty_netty_handler_proxy_4_1_45_Final.xml | 13 --------- ...__io_netty_netty_resolver_4_1_45_Final.xml | 13 --------- ..._io_netty_netty_transport_4_1_45_Final.xml | 13 --------- ...native_epoll_linux_x86_64_4_1_45_Final.xml | 13 --------- ...nsport_native_unix_common_4_1_45_Final.xml | 13 --------- ...ctor_netty_reactor_netty_0_9_4_RELEASE.xml | 13 --------- ...jectreactor_reactor_core_3_3_2_RELEASE.xml | 13 --------- ...pache_commons_commons_collections4_4_2.xml | 13 --------- ...org_apache_commons_commons_lang3_3_8_1.xml | 13 --------- ...n__org_apache_commons_commons_text_1_6.xml | 13 --------- ...rg_checkerframework_checker_qual_2_5_2.xml | 13 --------- ...s_mojo_animal_sniffer_annotations_1_17.xml | 13 --------- ...__org_hdrhistogram_HdrHistogram_2_1_11.xml | 13 --------- ...n__org_latencyutils_LatencyUtils_2_0_3.xml | 13 --------- ...reactivestreams_reactive_streams_1_0_3.xml | 13 --------- .../Maven__org_slf4j_slf4j_api_1_7_6.xml | 13 --------- pom.xml | 27 ++++++++++++++++++- 42 files changed, 36 insertions(+), 534 deletions(-) delete mode 100644 .idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml delete mode 100644 .idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml delete mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml delete mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml delete mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml delete mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml delete mode 100644 .idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml delete mode 100644 .idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml delete mode 100644 .idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml delete mode 100644 .idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml delete mode 100644 .idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml delete mode 100644 .idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml delete mode 100644 .idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml delete mode 100644 .idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml delete mode 100644 .idea/libraries/Maven__commons_io_commons_io_2_5.xml delete mode 100644 .idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml delete mode 100644 .idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml delete mode 100644 .idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml delete mode 100644 .idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml delete mode 100644 .idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml delete mode 100644 .idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml delete mode 100644 .idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml delete mode 100644 .idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml delete mode 100644 .idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml delete mode 100644 .idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml delete mode 100644 .idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml delete mode 100644 .idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml delete mode 100644 .idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml diff --git a/.idea/azure-cosmos-java-sql-api-samples.iml b/.idea/azure-cosmos-java-sql-api-samples.iml index 47b3dd5..cc3de2b 100644 --- a/.idea/azure-cosmos-java-sql-api-samples.iml +++ b/.idea/azure-cosmos-java-sql-api-samples.iml @@ -10,7 +10,7 @@ - + @@ -19,32 +19,29 @@ + + - - - - - + + - - - - - - + + + + - + diff --git a/.idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml b/.idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml deleted file mode 100644 index f8894b4..0000000 --- a/.idea/libraries/Maven__com_azure_azure_core_1_3_0_beta_1_dev_20200207_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml b/.idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml deleted file mode 100644 index 81e8e57..0000000 --- a/.idea/libraries/Maven__com_azure_azure_cosmos_4_0_0_preview_2.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml deleted file mode 100644 index 81b7257..0000000 --- a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_annotations_2_10_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml deleted file mode 100644 index f1b25f9..0000000 --- a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_core_2_10_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml deleted file mode 100644 index bdf5b51..0000000 --- a/.idea/libraries/Maven__com_fasterxml_jackson_core_jackson_databind_2_10_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml deleted file mode 100644 index 07259be..0000000 --- a/.idea/libraries/Maven__com_fasterxml_jackson_datatype_jackson_datatype_jsr310_2_10_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml b/.idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml deleted file mode 100644 index 9182d62..0000000 --- a/.idea/libraries/Maven__com_fasterxml_jackson_module_jackson_module_afterburner_2_10_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml b/.idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml deleted file mode 100644 index a82aa72..0000000 --- a/.idea/libraries/Maven__com_fasterxml_uuid_java_uuid_generator_3_1_5.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml b/.idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml deleted file mode 100644 index 1c380d0..0000000 --- a/.idea/libraries/Maven__com_google_code_findbugs_jsr305_3_0_2.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml b/.idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml deleted file mode 100644 index df0c40d..0000000 --- a/.idea/libraries/Maven__com_google_errorprone_error_prone_annotations_2_2_0.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml b/.idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml deleted file mode 100644 index 36e948e..0000000 --- a/.idea/libraries/Maven__com_google_guava_failureaccess_1_0_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml b/.idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml deleted file mode 100644 index e631133..0000000 --- a/.idea/libraries/Maven__com_google_guava_guava_27_0_1_jre.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml b/.idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml deleted file mode 100644 index 4e15702..0000000 --- a/.idea/libraries/Maven__com_google_guava_listenablefuture_9999_0_empty_to_avoid_conflict_with_guava.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml b/.idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml deleted file mode 100644 index c06f999..0000000 --- a/.idea/libraries/Maven__com_google_j2objc_j2objc_annotations_1_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__commons_io_commons_io_2_5.xml b/.idea/libraries/Maven__commons_io_commons_io_2_5.xml deleted file mode 100644 index 67c2ad2..0000000 --- a/.idea/libraries/Maven__commons_io_commons_io_2_5.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml b/.idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml deleted file mode 100644 index a4702c2..0000000 --- a/.idea/libraries/Maven__io_dropwizard_metrics_metrics_core_4_1_0.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml b/.idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml deleted file mode 100644 index 10f1f86..0000000 --- a/.idea/libraries/Maven__io_micrometer_micrometer_core_1_2_0.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml deleted file mode 100644 index 86e716b..0000000 --- a/.idea/libraries/Maven__io_netty_netty_buffer_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml deleted file mode 100644 index 20585b5..0000000 --- a/.idea/libraries/Maven__io_netty_netty_codec_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml deleted file mode 100644 index a80580a..0000000 --- a/.idea/libraries/Maven__io_netty_netty_codec_http2_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml deleted file mode 100644 index f6d6e45..0000000 --- a/.idea/libraries/Maven__io_netty_netty_codec_http_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml deleted file mode 100644 index 18ab7b1..0000000 --- a/.idea/libraries/Maven__io_netty_netty_codec_socks_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml deleted file mode 100644 index 36e6da4..0000000 --- a/.idea/libraries/Maven__io_netty_netty_common_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml deleted file mode 100644 index 44a05a9..0000000 --- a/.idea/libraries/Maven__io_netty_netty_handler_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml deleted file mode 100644 index d7e8065..0000000 --- a/.idea/libraries/Maven__io_netty_netty_handler_proxy_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml deleted file mode 100644 index e1aed92..0000000 --- a/.idea/libraries/Maven__io_netty_netty_resolver_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml deleted file mode 100644 index 0d498f9..0000000 --- a/.idea/libraries/Maven__io_netty_netty_transport_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml deleted file mode 100644 index eb9d94d..0000000 --- a/.idea/libraries/Maven__io_netty_netty_transport_native_epoll_linux_x86_64_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml b/.idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml deleted file mode 100644 index 46db9da..0000000 --- a/.idea/libraries/Maven__io_netty_netty_transport_native_unix_common_4_1_45_Final.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml b/.idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml deleted file mode 100644 index 3ad6d23..0000000 --- a/.idea/libraries/Maven__io_projectreactor_netty_reactor_netty_0_9_4_RELEASE.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml b/.idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml deleted file mode 100644 index e0296e0..0000000 --- a/.idea/libraries/Maven__io_projectreactor_reactor_core_3_3_2_RELEASE.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml b/.idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml deleted file mode 100644 index 1779477..0000000 --- a/.idea/libraries/Maven__org_apache_commons_commons_collections4_4_2.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml b/.idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml deleted file mode 100644 index 33b78e9..0000000 --- a/.idea/libraries/Maven__org_apache_commons_commons_lang3_3_8_1.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml b/.idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml deleted file mode 100644 index 5a2fa29..0000000 --- a/.idea/libraries/Maven__org_apache_commons_commons_text_1_6.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml b/.idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml deleted file mode 100644 index ad0d4fd..0000000 --- a/.idea/libraries/Maven__org_checkerframework_checker_qual_2_5_2.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml b/.idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml deleted file mode 100644 index 5c3a057..0000000 --- a/.idea/libraries/Maven__org_codehaus_mojo_animal_sniffer_annotations_1_17.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml b/.idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml deleted file mode 100644 index a1be137..0000000 --- a/.idea/libraries/Maven__org_hdrhistogram_HdrHistogram_2_1_11.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml b/.idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml deleted file mode 100644 index bf68169..0000000 --- a/.idea/libraries/Maven__org_latencyutils_LatencyUtils_2_0_3.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml b/.idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml deleted file mode 100644 index f17253b..0000000 --- a/.idea/libraries/Maven__org_reactivestreams_reactive_streams_1_0_3.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml b/.idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml deleted file mode 100644 index 65280d3..0000000 --- a/.idea/libraries/Maven__org_slf4j_slf4j_api_1_7_6.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/pom.xml b/pom.xml index 2a619b8..475bf54 100644 --- a/pom.xml +++ b/pom.xml @@ -11,6 +11,25 @@ UTF-8 + + + azure-sdk-for-java + https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-java/maven/v1 + + + + + azure-sdk-for-java + https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-java/maven/v1 + + true + + + true + + + + @@ -58,7 +77,13 @@ com.azure azure-cosmos - 4.0.0-preview.2 + 4.0.1-beta.1.dev.20200228.2 + + + com.azure + azure-core + + From ee1bcc26a4b10bdbad8c9ba62897be8d084bb854 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 18:08:28 -0800 Subject: [PATCH 008/110] Example code will work (once some dependency issues are resolved.) --- .../cosmos/examples/AccountSettings.java | 38 - .../com/azure/cosmos/examples/BasicDemo.java | 204 ----- .../azure/cosmos/examples/HelloWorldDemo.java | 105 --- .../changefeed/SampleChangeFeedProcessor.java | 34 +- .../multimaster/ConfigurationManager.java | 12 - .../rx/examples/multimaster/Helpers.java | 84 -- .../multimaster/samples/ConflictWorker.java | 858 ------------------ .../rx/examples/multimaster/samples/Main.java | 59 -- .../samples/MultiMasterScenario.java | 146 --- .../examples/multimaster/samples/Worker.java | 166 ---- 10 files changed, 1 insertion(+), 1705 deletions(-) delete mode 100644 src/main/java/com/azure/cosmos/examples/AccountSettings.java delete mode 100644 src/main/java/com/azure/cosmos/examples/BasicDemo.java delete mode 100644 src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java delete mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java delete mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java delete mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java delete mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java delete mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java delete mode 100644 src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java diff --git a/src/main/java/com/azure/cosmos/examples/AccountSettings.java b/src/main/java/com/azure/cosmos/examples/AccountSettings.java deleted file mode 100644 index e413f4e..0000000 --- a/src/main/java/com/azure/cosmos/examples/AccountSettings.java +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.examples; - -import org.apache.commons.lang3.StringUtils; - -/** - * Contains the account configurations for Sample. - * - * For running tests, you can pass a customized endpoint configuration in one of the following - * ways: - *
    - *
  • -DACCOUNT_KEY="[your-key]" -DACCOUNT_HOST="[your-endpoint]" as JVM - * command-line option.
  • - *
  • You can set COSMOS_ACCOUNT_KEY and COSMOS_ACCOUNT_HOST as environment variables.
  • - *
- * - * If none of the above is set, emulator endpoint will be used. - * Emulator http cert is self signed. If you are using emulator, - * make sure emulator https certificate is imported - * to java trusted cert store: - * https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator-export-ssl-certificates - */ -public class AccountSettings { - // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. - // The default values are credentials of the local emulator, which are not used in any production environment. - public static final String HOST = - System.getProperty("ACCOUNT_HOST", - StringUtils.defaultString(StringUtils.trimToNull( - System.getenv().get("COSMOS_ACCOUNT_HOST")), - "https://localhost:8081/")); - public static final String MASTER_KEY = - System.getProperty("ACCOUNT_KEY", - StringUtils.defaultString(StringUtils.trimToNull( - System.getenv().get("COSMOS_ACCOUNT_KEY")), - "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); -} diff --git a/src/main/java/com/azure/cosmos/examples/BasicDemo.java b/src/main/java/com/azure/cosmos/examples/BasicDemo.java deleted file mode 100644 index 4db349e..0000000 --- a/src/main/java/com/azure/cosmos/examples/BasicDemo.java +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -package com.azure.cosmos.examples; - -import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.CosmosAsyncContainer; -import com.azure.cosmos.CosmosAsyncDatabase; -import com.azure.cosmos.CosmosAsyncItemResponse; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.CosmosContainerProperties; -import com.azure.cosmos.CosmosContinuablePagedFlux; -import com.azure.cosmos.implementation.CosmosItemProperties; -import com.azure.cosmos.FeedOptions; -import com.azure.cosmos.FeedResponse; -import com.azure.cosmos.PartitionKey; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; - -public class BasicDemo { - - private static final String DATABASE_NAME = "test_db"; - private static final String CONTAINER_NAME = "test_container"; - private CosmosAsyncClient client; - private CosmosAsyncDatabase database; - private CosmosAsyncContainer container; - - public static void main(String[] args) { - BasicDemo demo = new BasicDemo(); - demo.start(); - } - - private void start() { - // Get client - client = CosmosAsyncClient.cosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .buildAsyncClient(); - - //CREATE a database and a container - createDbAndContainerBlocking(); - - //Get a proxy reference to container - container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME); - - CosmosAsyncContainer container = client.getDatabase(DATABASE_NAME).getContainer(CONTAINER_NAME); - TestObject testObject = new TestObject("item_new_id_1", "test", "test description", "US"); - TestObject testObject2 = new TestObject("item_new_id_2", "test2", "test description2", "CA"); - - //CREATE an Item async - - Mono> itemResponseMono = container.createItem(testObject); - //CREATE another Item async - Mono> itemResponseMono1 = container.createItem(testObject2); - - //Wait for completion - try { - itemResponseMono.doOnError(throwable -> log("CREATE item 1", throwable)) - .mergeWith(itemResponseMono1) - .doOnError(throwable -> log("CREATE item 2 ", throwable)) - .doOnComplete(() -> log("Items created")) - .publishOn(Schedulers.elastic()) - .blockLast(); - } catch (RuntimeException e) { - log("Couldn't create items due to above exceptions"); - } - - createAndReplaceItem(); - queryItems(); - queryWithContinuationToken(); - - //Close client - client.close(); - log("Completed"); - } - - private void createAndReplaceItem() { - TestObject replaceObject = new TestObject("item_new_id_3", "test3", "test description3", "JP"); - TestObject properties = null; - //CREATE item sync - try { - properties = container.createItem(replaceObject) - .doOnError(throwable -> log("CREATE 3", throwable)) - .publishOn(Schedulers.elastic()) - .block() - .getResource(); - } catch (RuntimeException e) { - log("Couldn't create items due to above exceptions"); - } - if (properties != null) { - replaceObject.setName("new name test3"); - - //REPLACE the item and wait for completion - container.replaceItem(replaceObject, - properties.getId(), - new PartitionKey(replaceObject.getCountry())) - .block(); - } - } - - private void createDbAndContainerBlocking() { - client.createDatabaseIfNotExists(DATABASE_NAME) - .doOnSuccess(cosmosDatabaseResponse -> log("Database: " + cosmosDatabaseResponse.getDatabase().getId())) - .flatMap(dbResponse -> dbResponse.getDatabase() - .createContainerIfNotExists(new CosmosContainerProperties(CONTAINER_NAME, - "/country"))) - .doOnSuccess(cosmosContainerResponse -> log("Container: " + cosmosContainerResponse.getContainer().getId())) - .doOnError(throwable -> log(throwable.getMessage())) - .publishOn(Schedulers.elastic()) - .block(); - } - - private void queryItems() { - log("+ Querying the collection "); - String query = "SELECT * from root"; - FeedOptions options = new FeedOptions(); - options.setMaxDegreeOfParallelism(2); - CosmosContinuablePagedFlux queryFlux = container.queryItems(query, options, TestObject.class); - - queryFlux.byPage() - .publishOn(Schedulers.elastic()) - .toIterable() - .forEach(cosmosItemFeedResponse -> { - log(cosmosItemFeedResponse.getResults()); - }); - - } - - private void queryWithContinuationToken() { - log("+ Query with paging using continuation token"); - String query = "SELECT * from root r "; - FeedOptions options = new FeedOptions(); - options.populateQueryMetrics(true); - options.maxItemCount(1); - String continuation = null; - do { - options.requestContinuation(continuation); - CosmosContinuablePagedFlux queryFlux = container.queryItems(query, options, TestObject.class); - FeedResponse page = queryFlux.byPage().blockFirst(); - assert page != null; - log(page.getResults()); - continuation = page.getContinuationToken(); - } while (continuation != null); - - } - - private void log(Object object) { - System.out.println(object); - } - - private void log(String msg, Throwable throwable) { - if (throwable instanceof CosmosClientException) { - log(msg + ": " + ((CosmosClientException) throwable).getStatusCode()); - } - } - - static class TestObject { - String id; - String name; - String description; - String country; - - public TestObject() { - } - - public TestObject(String id, String name, String description, String country) { - this.id = id; - this.name = name; - this.description = description; - this.country = country; - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getCountry() { - return country; - } - - public void setCountry(String country) { - this.country = country; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - } -} diff --git a/src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java b/src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java deleted file mode 100644 index f571a11..0000000 --- a/src/main/java/com/azure/cosmos/examples/HelloWorldDemo.java +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -package com.azure.cosmos.examples; - -import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.CosmosAsyncContainer; -import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.implementation.CosmosItemProperties; -import com.azure.cosmos.PartitionKey; -import reactor.core.publisher.Mono; - -import java.io.IOException; - -public class HelloWorldDemo { - public static void main(String[] args) { - new HelloWorldDemo().runDemo(); - } - - void runDemo() { - // Create a new CosmosAsyncClient via the CosmosClientBuilder - // It only requires endpoint and key, but other useful settings are available - CosmosAsyncClient client = new CosmosClientBuilder() - .setEndpoint("") - .setKey("") - .buildAsyncClient(); - - // Get a reference to the container - // This will create (or read) a database and its container. - CosmosAsyncContainer container = client.createDatabaseIfNotExists("contoso-travel") - // TIP: Our APIs are Reactor Core based, so try to chain your calls - .flatMap(response -> response.getDatabase() - .createContainerIfNotExists("passengers", "/id")) - .flatMap(response -> Mono.just(response.getContainer())) - .block(); // Blocking for demo purposes (avoid doing this in production unless you must) - - // Create an item - container.createItem(new Passenger("carla.davis@outlook.com", "Carla Davis", "SEA", "IND")) - .flatMap(response -> { - System.out.println("Created item: " + response.getResource()); - // Read that item 👓 - return container.readItem(response.getResource().getId(), - new PartitionKey(response.getResource().getId()), - Passenger.class); - }) - .flatMap(response -> { - System.out.println("Read item: " + response.getResource()); - // Replace that item 🔁 - Passenger p = response.getResource(); - p.setDestination("SFO"); - return container.replaceItem(p, - response.getResource().getId(), - new PartitionKey(response.getResource().getId())); - }) - // delete that item 💣 - .flatMap(response -> container.deleteItem(response.getResource().getId(), - new PartitionKey(response.getResource().getId()))) - .block(); // Blocking for demo purposes (avoid doing this in production unless you must) - } - - // Just a random object for demo's sake - public class Passenger { - String id; - String name; - String destination; - String source; - - public Passenger(String id, String name, String destination, String source) { - this.id = id; - this.name = name; - this.destination = destination; - this.source = source; - } - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getDestination() { - return destination; - } - - public void setDestination(String destination) { - this.destination = destination; - } - - public String getSource() { - return source; - } - - public void setSource(String source) { - this.source = source; - } - } -} diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index 05e6934..948c95a 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -119,31 +119,6 @@ public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, Cosmos .build(); } - /* - public static ChangeFeedProcessor getChangeFeedProcessorCustomPOJO(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { - return ChangeFeedProcessor.changeFeedProcessorBuilder() - .setHostName(hostName) - .setFeedContainer(feedContainer) - .setLeaseContainer(leaseContainer) - .setHandleChanges((List docs) -> { - System.out.println("--->setHandleChanges() START"); - - for (CustomPOJO document : docs) { - try { - System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(document)); - } catch (JsonProcessingException e) { - e.printStackTrace(); - } - } - System.out.println("--->handleChanges() END"); - - }) - .build(); - } - - - */ public static CosmosAsyncClient getCosmosClient() { return new CosmosClientBuilder() @@ -186,9 +161,7 @@ public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, } CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, "/id"); - CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); - containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); if (containerResponse == null) { @@ -293,13 +266,8 @@ public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerCl public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { String suffix = RandomStringUtils.randomAlphabetic(10); for (int i = 0; i <= count; i++) { -// CosmosItemProperties document = new CosmosItemProperties(); -// document.setId(String.format("0%d-%s", i, suffix)); - String jsonString = - "{\n" + - "\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"\n" + - "}"; + String jsonString = "{\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"}"; ObjectMapper mapper = new ObjectMapper(); JsonNode document = null; diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java deleted file mode 100644 index 2c3b676..0000000 --- a/src/main/java/com/azure/cosmos/rx/examples/multimaster/ConfigurationManager.java +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.rx.examples.multimaster; - -import java.util.Properties; - -public class ConfigurationManager { - public static Properties getAppSettings() { - return System.getProperties(); - } -} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java deleted file mode 100644 index 0f7748a..0000000 --- a/src/main/java/com/azure/cosmos/rx/examples/multimaster/Helpers.java +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.rx.examples.multimaster; - -import com.azure.cosmos.implementation.AsyncDocumentClient; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.implementation.Database; -import com.azure.cosmos.implementation.DocumentCollection; -import com.azure.cosmos.implementation.ResourceResponse; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -public class Helpers { - - static public String createDocumentCollectionUri(String databaseName, String collectionName) { - return String.format("/dbs/%s/colls/%s", databaseName, collectionName); - } - - static public String createDatabaseUri(String databaseName) { - return String.format("/dbs/%s", databaseName); - } - - static public Mono createDatabaseIfNotExists(AsyncDocumentClient client, String databaseName) { - - return client.readDatabase("/dbs/" + databaseName, null) - .onErrorResume( - e -> { - if (e instanceof CosmosClientException) { - CosmosClientException dce = (CosmosClientException) e; - if (dce.getStatusCode() == 404) { - // if doesn't exist create it - - Database d = new Database(); - d.setId(databaseName); - - return client.createDatabase(d, null); - } - } - - return Mono.error(e); - } - ).map(ResourceResponse::getResource).single(); - } - - static public Mono createCollectionIfNotExists(AsyncDocumentClient client, String databaseName, String collectionName) { - return client.readCollection(createDocumentCollectionUri(databaseName, collectionName), null) - .onErrorResume( - e -> { - if (e instanceof CosmosClientException) { - CosmosClientException dce = (CosmosClientException) e; - if (dce.getStatusCode() == 404) { - // if doesn't exist create it - - DocumentCollection collection = new DocumentCollection(); - collection.setId(collectionName); - - return client.createCollection(createDatabaseUri(databaseName), collection, null); - } - } - - return Mono.error(e); - } - ).map(ResourceResponse::getResource).single(); - } - - static public Mono createCollectionIfNotExists(AsyncDocumentClient client, String databaseName, DocumentCollection collection) { - return client.readCollection(createDocumentCollectionUri(databaseName, collection.getId()), null) - .onErrorResume( - e -> { - if (e instanceof CosmosClientException) { - CosmosClientException dce = (CosmosClientException) e; - if (dce.getStatusCode() == 404) { - // if doesn't exist create it - - return client.createCollection(createDatabaseUri(databaseName), collection, null); - } - } - - return Mono.error(e); - } - ).map(ResourceResponse::getResource).single(); - } -} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java deleted file mode 100644 index f5bdcf8..0000000 --- a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/ConflictWorker.java +++ /dev/null @@ -1,858 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.rx.examples.multimaster.samples; - -import com.azure.cosmos.AccessCondition; -import com.azure.cosmos.AccessConditionType; -import com.azure.cosmos.BridgeInternal; -import com.azure.cosmos.ConflictResolutionPolicy; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.FeedResponse; -import com.azure.cosmos.Resource; -import com.azure.cosmos.implementation.AsyncDocumentClient; -import com.azure.cosmos.implementation.Conflict; -import com.azure.cosmos.implementation.Document; -import com.azure.cosmos.implementation.DocumentCollection; -import com.azure.cosmos.implementation.RequestOptions; -import com.azure.cosmos.implementation.ResourceResponse; -import com.azure.cosmos.implementation.StoredProcedure; -import com.azure.cosmos.rx.examples.multimaster.Helpers; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -public class ConflictWorker { - private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class); - - private final Scheduler schedulerForBlockingWork; - private final List clients; - private final String basicCollectionUri; - private final String manualCollectionUri; - private final String lwwCollectionUri; - private final String udpCollectionUri; - private final String databaseName; - private final String basicCollectionName; - private final String manualCollectionName; - private final String lwwCollectionName; - private final String udpCollectionName; - private final ExecutorService executor; - - public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) { - this.clients = new ArrayList<>(); - this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName); - this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName); - this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName); - this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName); - - this.databaseName = databaseName; - this.basicCollectionName = basicCollectionName; - this.manualCollectionName = manualCollectionName; - this.lwwCollectionName = lwwCollectionName; - this.udpCollectionName = udpCollectionName; - - this.executor = Executors.newFixedThreadPool(100); - this.schedulerForBlockingWork = Schedulers.fromExecutor(executor); - } - - public void addClient(AsyncDocumentClient client) { - this.clients.add(client); - } - - private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) { - return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection) - .subscribeOn(schedulerForBlockingWork).block(); - } - - private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) { - - return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName) - .subscribeOn(schedulerForBlockingWork).block(); - } - - private DocumentCollection getCollectionDefForManual(String id) { - DocumentCollection collection = new DocumentCollection(); - collection.setId(id); - ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(); - collection.setConflictResolutionPolicy(policy); - return collection; - } - - private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) { - DocumentCollection collection = new DocumentCollection(); - collection.setId(id); - ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath); - collection.setConflictResolutionPolicy(policy); - return collection; - } - - private DocumentCollection getCollectionDefForCustom(String id, String storedProc) { - DocumentCollection collection = new DocumentCollection(); - collection.setId(id); - ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc); - collection.setConflictResolutionPolicy(policy); - return collection; - } - - public void initialize() throws Exception { - AsyncDocumentClient createClient = this.clients.get(0); - - Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block(); - - DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName); - - DocumentCollection manualCollection = createCollectionIfNotExists(createClient, - Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName)); - - DocumentCollection lwwCollection = createCollectionIfNotExists(createClient, - Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId")); - - DocumentCollection udpCollection = createCollectionIfNotExists(createClient, - Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName, - String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver"))); - - StoredProcedure lwwSproc = new StoredProcedure(); - lwwSproc.setId("resolver"); - lwwSproc.setBody(IOUtils.toString( - getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8")); - - lwwSproc = - getResource(createClient.upsertStoredProcedure( - Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null)); - - } - - private T getResource(Mono> obs) { - return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource(); - } - - public void runManualConflict() throws Exception { - logger.info("\r\nInsert Conflict\r\n"); - this.runInsertConflictOnManual(); - - logger.info("\r\nUPDATE Conflict\r\n"); - this.runUpdateConflictOnManual(); - - logger.info("\r\nDELETE Conflict\r\n"); - this.runDeleteConflictOnManual(); - } - - public void runLWWConflict() throws Exception { - logger.info("\r\nInsert Conflict\r\n"); - this.runInsertConflictOnLWW(); - - logger.info("\r\nUPDATE Conflict\r\n"); - this.runUpdateConflictOnLWW(); - - logger.info("\r\nDELETE Conflict\r\n"); - this.runDeleteConflictOnLWW(); - } - - public void runUDPConflict() throws Exception { - logger.info("\r\nInsert Conflict\r\n"); - this.runInsertConflictOnUdp(); - - logger.info("\r\nUPDATE Conflict\r\n"); - this.runUpdateConflictOnUdp(); - - logger.info("\r\nDELETE Conflict\r\n"); - this.runDeleteConflictOnUdp(); - } - - public void runInsertConflictOnManual() throws Exception { - do { - logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName); - - ArrayList> insertTask = new ArrayList<>(); - - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block(); - - if (conflictDocuments.size() == this.clients.size()) { - logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size()); - - for (Document conflictingInsert : conflictDocuments) { - this.validateManualConflict(this.clients, conflictingInsert); - } - break; - } else { - logger.info("Retrying insert to induce conflicts"); - } - } while (true); - } - - public void runUpdateConflictOnManual() throws Exception { - do { - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - - conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0) - .block(); - - TimeUnit.SECONDS.sleep(1);//1 Second for write to sync. - - - logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName); - - ArrayList> updateTask = new ArrayList<>(); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(updateTask).collectList().single().block(); - - if (conflictDocuments.size() > 1) { - logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size()); - - for (Document conflictingUpdate : conflictDocuments) { - this.validateManualConflict(this.clients, conflictingUpdate); - } - break; - } else { - logger.info("Retrying update to induce conflicts"); - } - } while (true); - } - - public void runDeleteConflictOnManual() throws Exception { - do { - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0) - .block(); - - TimeUnit.SECONDS.sleep(10);//1 Second for write to sync. - - logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName); - - ArrayList> deleteTask = new ArrayList<>(); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(deleteTask).collectList() - .subscribeOn(schedulerForBlockingWork) - .single().block(); - - if (conflictDocuments.size() > 1) { - logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size()); - - for (Document conflictingDelete : conflictDocuments) { - this.validateManualConflict(this.clients, conflictingDelete); - } - - break; - } else { - logger.info("Retrying update to induce conflicts"); - } - } while (true); - } - - public void runInsertConflictOnLWW() throws Exception { - do { - logger.info("Performing conflicting insert across 3 regions"); - - ArrayList> insertTask = new ArrayList<>(); - - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); - - - if (conflictDocuments.size() > 1) { - logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size()); - - this.validateLWW(this.clients, conflictDocuments); - - break; - } else { - logger.info("Retrying insert to induce conflicts"); - } - } while (true); - } - - public void runUpdateConflictOnLWW() throws Exception { - do { - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0) - .block(); - - - TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. - - logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri); - - ArrayList> insertTask = new ArrayList<>(); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); - - - if (conflictDocuments.size() > 1) { - logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size()); - - this.validateLWW(this.clients, conflictDocuments); - - break; - } else { - logger.info("Retrying insert to induce conflicts"); - } - } while (true); - } - - public void runDeleteConflictOnLWW() throws Exception { - do { - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0) - .block(); - - - TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. - - logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri); - - ArrayList> insertTask = new ArrayList<>(); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - if (index % 2 == 1) { - //We delete from region 1, even though region 2 always win. - insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++)); - } else { - insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++)); - } - } - - List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); - - if (conflictDocuments.size() > 1) { - logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size()); - - //DELETE should always win. irrespective of LWW. - this.validateLWW(this.clients, conflictDocuments, true); - break; - } else { - logger.info("Retrying update/delete to induce conflicts"); - } - } while (true); - } - - public void runInsertConflictOnUdp() throws Exception { - do { - logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName); - - ArrayList> insertTask = new ArrayList<>(); - - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(insertTask).collectList().single().block(); - - - if (conflictDocuments.size() > 1) { - logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size()); - - this.validateUDPAsync(this.clients, conflictDocuments); - - break; - } else { - logger.info("Retrying insert to induce conflicts"); - } - } while (true); - } - - public void runUpdateConflictOnUdp() throws Exception { - do { - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0) - .block(); - - TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. - - logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri); - - ArrayList> updateTask = new ArrayList<>(); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++)); - } - - List conflictDocuments = Flux.merge(updateTask).collectList().single().block(); - - - if (conflictDocuments.size() > 1) { - logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size()); - - this.validateUDPAsync(this.clients, conflictDocuments); - - break; - } else { - logger.info("Retrying update to induce conflicts"); - } - } while (true); - } - - public void runDeleteConflictOnUdp() throws Exception { - do { - Document conflictDocument = new Document(); - conflictDocument.setId(UUID.randomUUID().toString()); - - conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0) - .block(); - - TimeUnit.SECONDS.sleep(1); //1 Second for write to sync. - - logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri); - - ArrayList> deleteTask = new ArrayList<>(); - - int index = 0; - for (AsyncDocumentClient client : this.clients) { - if (index % 2 == 1) { - //We delete from region 1, even though region 2 always win. - deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++)); - } else { - deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++)); - } - } - - List conflictDocuments = Flux.merge(deleteTask).collectList().single().block(); - - if (conflictDocuments.size() > 1) { - logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size()); - - //DELETE should always win. irrespective of LWW. - this.validateUDPAsync(this.clients, conflictDocuments, true); - break; - } else { - logger.info("Retrying update/delete to induce conflicts"); - } - } while (true); - } - - private Mono tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { - - logger.debug("region: {}", client.getWriteEndpoint()); - BridgeInternal.setProperty(document, "regionId", index); - BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); - return client.createDocument(collectionUri, document, null, false) - .onErrorResume(e -> { - if (hasDocumentClientException(e, 409)) { - return Mono.empty(); - } else { - return Mono.error(e); - } - }).map(ResourceResponse::getResource); - } - - private boolean hasDocumentClientException(Throwable e, int statusCode) { - if (e instanceof CosmosClientException) { - CosmosClientException dce = (CosmosClientException) e; - return dce.getStatusCode() == statusCode; - } - - return false; - } - - private boolean hasDocumentClientExceptionCause(Throwable e) { - while (e != null) { - if (e instanceof CosmosClientException) { - return true; - } - - e = e.getCause(); - } - return false; - } - - private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) { - while (e != null) { - if (e instanceof CosmosClientException) { - CosmosClientException dce = (CosmosClientException) e; - return dce.getStatusCode() == statusCode; - } - - e = e.getCause(); - } - - return false; - } - - private Mono tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { - BridgeInternal.setProperty(document, "regionId", index); - BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); - - RequestOptions options = new RequestOptions(); - options.setAccessCondition(new AccessCondition()); - options.getAccessCondition().setType(AccessConditionType.IF_MATCH); - options.getAccessCondition().setCondition(document.getETag()); - - - return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> { - - // pre condition failed - if (hasDocumentClientException(e, 412)) { - //Lost synchronously or not document yet. No conflict is induced. - return Mono.empty(); - - } - return Mono.error(e); - }).map(ResourceResponse::getResource); - } - - private Mono tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) { - BridgeInternal.setProperty(document, "regionId", index); - BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint()); - - RequestOptions options = new RequestOptions(); - options.setAccessCondition(new AccessCondition()); - options.getAccessCondition().setType(AccessConditionType.IF_MATCH); - options.getAccessCondition().setCondition(document.getETag()); - - - return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> { - - // pre condition failed - if (hasDocumentClientException(e, 412)) { - //Lost synchronously. No conflict is induced. - return Mono.empty(); - - } - return Mono.error(e); - }).map(rr -> document); - } - - private void validateManualConflict(List clients, Document conflictDocument) throws Exception { - boolean conflictExists = false; - for (AsyncDocumentClient client : clients) { - conflictExists = this.validateManualConflict(client, conflictDocument); - } - - if (conflictExists) { - this.deleteConflict(conflictDocument); - } - } - - private boolean isDelete(Conflict conflict) { - return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete"); - } - - - private boolean equals(String a, String b) { - return StringUtils.equals(a, b); - } - - private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception { - while (true) { - FeedResponse response = client.readConflicts(this.manualCollectionUri, null) - .take(1).single().block(); - - for (Conflict conflict : response.getResults()) { - if (!isDelete(conflict)) { - Document conflictDocumentContent = conflict.getResource(Document.class); - if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) { - if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) && - equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) { - logger.info("Document from Region {} lost conflict @ {}", - conflictDocument.getId(), - conflictDocument.getInt("regionId"), - client.getReadEndpoint()); - return true; - } else { - try { - //Checking whether this is the winner. - Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null) - .single().block().getResource(); - logger.info("Document from region {} won the conflict @ {}", - conflictDocument.getInt("regionId"), - client.getReadEndpoint()); - return false; - } - catch (Exception exception) { - if (hasDocumentClientException(exception, 404)) { - throw exception; - } else { - logger.info( - "Document from region {} not found @ {}", - conflictDocument.getInt("regionId"), - client.getReadEndpoint()); - } - } - } - } - } else { - if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) { - logger.info("DELETE conflict found @ {}", - client.getReadEndpoint()); - return false; - } - } - } - - logger.error("Document {} is not found in conflict feed @ {}, retrying", - conflictDocument.getId(), - client.getReadEndpoint()); - - TimeUnit.MILLISECONDS.sleep(500); - } - } - - private void deleteConflict(Document conflictDocument) { - AsyncDocumentClient delClient = clients.get(0); - - FeedResponse conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block(); - - for (Conflict conflict : conflicts.getResults()) { - if (!isDelete(conflict)) { - Document conflictContent = conflict.getResource(Document.class); - if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId()) - && equals(conflictContent.getETag(), conflictDocument.getETag())) { - logger.info("Deleting manual conflict {} from region {}", - conflict.getSourceResourceId(), - conflictContent.getInt("regionId")); - delClient.deleteConflict(conflict.getSelfLink(), null) - .single().block(); - - } - } else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) { - logger.info("Deleting manual conflict {} from region {}", - conflict.getSourceResourceId(), - conflictDocument.getInt("regionId")); - delClient.deleteConflict(conflict.getSelfLink(), null) - .single().block(); - } - } - } - - private void validateLWW(List clients, List conflictDocument) throws Exception { - validateLWW(clients, conflictDocument, false); - } - - - private void validateLWW(List clients, List conflictDocument, boolean hasDeleteConflict) throws Exception { - for (AsyncDocumentClient client : clients) { - this.validateLWW(client, conflictDocument, hasDeleteConflict); - } - } - - private void validateLWW(AsyncDocumentClient client, List conflictDocument, boolean hasDeleteConflict) throws Exception { - FeedResponse response = client.readConflicts(this.lwwCollectionUri, null) - .take(1).single().block(); - - if (response.getResults().size() != 0) { - logger.error("Found {} conflicts in the lww collection", response.getResults().size()); - return; - } - - if (hasDeleteConflict) { - do { - try { - client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block(); - - logger.error("DELETE conflict for document {} didnt win @ {}", - conflictDocument.get(0).getId(), - client.getReadEndpoint()); - - TimeUnit.MILLISECONDS.sleep(500); - } catch (Exception exception) { - if (!hasDocumentClientExceptionCause(exception)) { - throw exception; - } - - // NotFound - if (hasDocumentClientExceptionCause(exception, 404)) { - - logger.info("DELETE conflict won @ {}", client.getReadEndpoint()); - return; - } else { - logger.error("DELETE conflict for document {} didnt win @ {}", - conflictDocument.get(0).getId(), - client.getReadEndpoint()); - - TimeUnit.MILLISECONDS.sleep(500); - } - } - } while (true); - } - - Document winnerDocument = null; - - for (Document document : conflictDocument) { - if (winnerDocument == null || - winnerDocument.getInt("regionId") <= document.getInt("regionId")) { - winnerDocument = document; - } - } - - logger.info("Document from region {} should be the winner", - winnerDocument.getInt("regionId")); - - while (true) { - try { - Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null) - .single().block().getResource(); - - if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) { - logger.info("Winner document from region {} found at {}", - existingDocument.getInt("regionId"), - client.getReadEndpoint()); - break; - } else { - logger.error("Winning document version from region {} is not found @ {}, retrying...", - winnerDocument.getInt("regionId"), - client.getWriteEndpoint()); - TimeUnit.MILLISECONDS.sleep(500); - } - } catch (Exception e) { - logger.error("Winner document from region {} is not found @ {}, retrying...", - winnerDocument.getInt("regionId"), - client.getWriteEndpoint()); - TimeUnit.MILLISECONDS.sleep(500); - } - } - } - - private void validateUDPAsync(List clients, List conflictDocument) throws Exception { - validateUDPAsync(clients, conflictDocument, false); - } - - private void validateUDPAsync(List clients, List conflictDocument, boolean hasDeleteConflict) throws Exception { - for (AsyncDocumentClient client : clients) { - this.validateUDPAsync(client, conflictDocument, hasDeleteConflict); - } - } - - private String documentNameLink(String collectionId, String documentId) { - return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId); - } - - private void validateUDPAsync(AsyncDocumentClient client, List conflictDocument, boolean hasDeleteConflict) throws Exception { - FeedResponse response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block(); - - if (response.getResults().size() != 0) { - logger.error("Found {} conflicts in the udp collection", response.getResults().size()); - return; - } - - if (hasDeleteConflict) { - do { - try { - client.readDocument( - documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null) - .single().block(); - - logger.error("DELETE conflict for document {} didnt win @ {}", - conflictDocument.get(0).getId(), - client.getReadEndpoint()); - - TimeUnit.MILLISECONDS.sleep(500); - - } catch (Exception exception) { - if (hasDocumentClientExceptionCause(exception, 404)) { - logger.info("DELETE conflict won @ {}", client.getReadEndpoint()); - return; - } else { - logger.error("DELETE conflict for document {} didnt win @ {}", - conflictDocument.get(0).getId(), - client.getReadEndpoint()); - - TimeUnit.MILLISECONDS.sleep(500); - } - } - } while (true); - } - - Document winnerDocument = null; - - for (Document document : conflictDocument) { - if (winnerDocument == null || - winnerDocument.getInt("regionId") <= document.getInt("regionId")) { - winnerDocument = document; - } - } - - logger.info("Document from region {} should be the winner", - winnerDocument.getInt("regionId")); - - while (true) { - try { - - Document existingDocument = client.readDocument( - documentNameLink(udpCollectionName, winnerDocument.getId()), null) - .single().block().getResource(); - - if (existingDocument.getInt("regionId") == winnerDocument.getInt( - ("regionId"))) { - logger.info("Winner document from region {} found at {}", - existingDocument.getInt("regionId"), - client.getReadEndpoint()); - break; - } else { - logger.error("Winning document version from region {} is not found @ {}, retrying...", - winnerDocument.getInt("regionId"), - client.getWriteEndpoint()); - TimeUnit.MILLISECONDS.sleep(500); - } - } catch (Exception e) { - logger.error("Winner document from region {} is not found @ {}, retrying...", - winnerDocument.getInt("regionId"), - client.getWriteEndpoint()); - TimeUnit.MILLISECONDS.sleep(500); - } - } - } - - public void shutdown() { - this.executor.shutdown(); - for(AsyncDocumentClient client: clients) { - client.close(); - } - } -} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java deleted file mode 100644 index becfb12..0000000 --- a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Main.java +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.rx.examples.multimaster.samples; - -import com.azure.cosmos.rx.examples.multimaster.ConfigurationManager; -import org.apache.commons.io.IOUtils; - -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; - - -public class Main { - public static void main(String[] args) throws Exception { - - if (args.length != 1) { - help(); - System.exit(1); - } - - try (InputStream inputStream = new FileInputStream(args[0])) { - ConfigurationManager.getAppSettings().load(inputStream); - System.out.println("Using file " + args[0] + " for the setting."); - } - - Main.runScenarios(); - } - - private static void runScenarios() throws Exception { - MultiMasterScenario scenario = new MultiMasterScenario(); - scenario.initialize(); - - scenario.runBasic(); - - scenario.runManualConflict(); - scenario.runLWW(); - scenario.runUDP(); - - System.out.println("Finished"); - - //shutting down the active the resources - scenario.shutdown(); - } - - private static void help() throws IOException { - System.out.println("Provide the path to setting file in the following format: "); - try (InputStream inputStream = - Main.class.getClassLoader() - .getResourceAsStream("multi-master-sample-config.properties")) { - - IOUtils.copy(inputStream, System.out); - - System.out.println(); - } catch (Exception e) { - throw e; - } - } -} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java deleted file mode 100644 index c3ba36a..0000000 --- a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/MultiMasterScenario.java +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.rx.examples.multimaster.samples; - -import com.azure.cosmos.implementation.AsyncDocumentClient; -import com.azure.cosmos.ConnectionPolicy; -import com.azure.cosmos.ConsistencyLevel; -import com.azure.cosmos.rx.examples.multimaster.ConfigurationManager; -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.core.publisher.Mono; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class MultiMasterScenario { - - private final static Logger logger = LoggerFactory.getLogger(MultiMasterScenario.class); - - final private String accountEndpoint; - final private String accountKey; - final private List workers; - final private ConflictWorker conflictWorker; - - public MultiMasterScenario() { - this.accountEndpoint = ConfigurationManager.getAppSettings().getProperty("endpoint"); - this.accountKey = ConfigurationManager.getAppSettings().getProperty("key"); - - String databaseName = ConfigurationManager.getAppSettings().getProperty("databaseName"); - String manualCollectionName = ConfigurationManager.getAppSettings().getProperty("manualCollectionName"); - String lwwCollectionName = ConfigurationManager.getAppSettings().getProperty("lwwCollectionName"); - String udpCollectionName = ConfigurationManager.getAppSettings().getProperty("udpCollectionName"); - String basicCollectionName = ConfigurationManager.getAppSettings().getProperty("basicCollectionName"); - String regionsAsString = ConfigurationManager.getAppSettings().getProperty("regions"); - Preconditions.checkNotNull(regionsAsString, "regions is required"); - String[] regions = regionsAsString.split(";"); - Preconditions.checkArgument(regions.length > 0, "at least one region is required"); - Preconditions.checkNotNull(accountEndpoint, "accountEndpoint is required"); - Preconditions.checkNotNull(accountKey, "accountKey is required"); - Preconditions.checkNotNull(databaseName, "databaseName is required"); - Preconditions.checkNotNull(manualCollectionName, "manualCollectionName is required"); - Preconditions.checkNotNull(lwwCollectionName, "lwwCollectionName is required"); - Preconditions.checkNotNull(udpCollectionName, "udpCollectionName is required"); - Preconditions.checkNotNull(basicCollectionName, "basicCollectionName is required"); - - this.workers = new ArrayList<>(); - this.conflictWorker = new ConflictWorker(databaseName, basicCollectionName, manualCollectionName, lwwCollectionName, udpCollectionName); - - for (String region : regions) { - ConnectionPolicy policy = new ConnectionPolicy(); - policy.setUsingMultipleWriteLocations(true); - policy.setPreferredLocations(Collections.singletonList(region)); - - AsyncDocumentClient client = - new AsyncDocumentClient.Builder() - .withMasterKeyOrResourceToken(this.accountKey) - .withServiceEndpoint(this.accountEndpoint) - .withConsistencyLevel(ConsistencyLevel.EVENTUAL) - .withConnectionPolicy(policy).build(); - - - workers.add(new Worker(client, databaseName, basicCollectionName)); - - conflictWorker.addClient(client); - } - } - - public void initialize() throws Exception { - this.conflictWorker.initialize(); - logger.info("Initialized collections."); - } - - public void runBasic() throws Exception { - logger.info("\n####################################################"); - logger.info("Basic Active-Active"); - logger.info("####################################################"); - - logger.info("1) Starting insert loops across multiple regions ..."); - - List> basicTask = new ArrayList<>(); - - int documentsToInsertPerWorker = 100; - - for (Worker worker : this.workers) { - basicTask.add(worker.runLoopAsync(documentsToInsertPerWorker)); - } - - Mono.when(basicTask).block(); - - basicTask.clear(); - - logger.info("2) Reading from every region ..."); - - int expectedDocuments = this.workers.size() * documentsToInsertPerWorker; - for (Worker worker : this.workers) { - basicTask.add(worker.readAllAsync(expectedDocuments)); - } - - Mono.when(basicTask).block(); - - basicTask.clear(); - - logger.info("3) Deleting all the documents ..."); - - this.workers.get(0).deleteAll(); - - logger.info("####################################################"); - } - - public void runManualConflict() throws Exception { - logger.info("\n####################################################"); - logger.info("Manual Conflict Resolution"); - logger.info("####################################################"); - - this.conflictWorker.runManualConflict(); - logger.info("####################################################"); - } - - public void runLWW() throws Exception { - logger.info("\n####################################################"); - logger.info("LWW Conflict Resolution"); - logger.info("####################################################"); - - this.conflictWorker.runLWWConflict(); - logger.info("####################################################"); - } - - public void runUDP() throws Exception { - logger.info("\n####################################################"); - logger.info("UDP Conflict Resolution"); - logger.info("####################################################"); - - this.conflictWorker.runUDPConflict(); - logger.info("####################################################"); - } - - public void shutdown() { - conflictWorker.shutdown(); - for(Worker worker: this.workers) { - worker.shutdown(); - } - } -} diff --git a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java b/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java deleted file mode 100644 index 4addb75..0000000 --- a/src/main/java/com/azure/cosmos/rx/examples/multimaster/samples/Worker.java +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.rx.examples.multimaster.samples; - - -import com.azure.cosmos.implementation.AsyncDocumentClient; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.implementation.Document; -import com.azure.cosmos.FeedOptions; -import com.azure.cosmos.FeedResponse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -public class Worker { - private final static Logger logger = LoggerFactory.getLogger(Worker.class); - - private final AsyncDocumentClient client; - private final String documentCollectionUri; - - // scheduler for blocking work - private final Scheduler schedulerForBlockingWork; - private final ExecutorService executor; - - public Worker(AsyncDocumentClient client, String databaseName, String collectionName) { - this.client = client; - this.documentCollectionUri = String.format("/dbs/%s/colls/%s", databaseName, collectionName); - this.executor = Executors.newSingleThreadExecutor(); - this.schedulerForBlockingWork = Schedulers.fromExecutor(executor); - } - - public Mono runLoopAsync(int documentsToInsert) { - return Mono.defer(() -> { - - int iterationCount = 0; - - List latency = new ArrayList<>(); - while (iterationCount++ < documentsToInsert) { - long startTick = System.currentTimeMillis(); - - Document d = new Document(); - d.setId(UUID.randomUUID().toString()); - - this.client.createDocument(this.documentCollectionUri, d, null, false) - .subscribeOn(schedulerForBlockingWork).single().block(); - - long endTick = System.currentTimeMillis(); - - latency.add(endTick - startTick); - } - - Collections.sort(latency); - int p50Index = (latency.size() / 2); - - logger.info("Inserted {} documents at {} with p50 {} ms", - documentsToInsert, - this.client.getWriteEndpoint(), - latency.get(p50Index)); - - return Mono.empty(); - - }); - - } - - - public Mono readAllAsync(int expectedNumberOfDocuments) { - - return Mono.defer(() -> { - - while (true) { - int totalItemRead = 0; - FeedResponse response = null; - do { - - FeedOptions options = new FeedOptions(); - options.requestContinuation(response != null ? response.getContinuationToken() : null); - - response = this.client.readDocuments(this.documentCollectionUri, options).take(1) - .subscribeOn(schedulerForBlockingWork).single().block(); - - totalItemRead += response.getResults().size(); - } while (response.getContinuationToken() != null); - - if (totalItemRead < expectedNumberOfDocuments) { - logger.info("Total item read {} from {} is less than {}, retrying reads", - totalItemRead, - this.client.getReadEndpoint(), - expectedNumberOfDocuments); - - try { - TimeUnit.SECONDS.sleep(1); - } catch (InterruptedException e) { - logger.info("interrupted"); - break; - } - continue; - } else { - logger.info("READ {} items from {}", totalItemRead, this.client.getReadEndpoint()); - break; - } - } - - return Mono.empty(); - }); - } - - void deleteAll() { - List documents = new ArrayList<>(); - FeedResponse response = null; - do { - - FeedOptions options = new FeedOptions(); - options.requestContinuation(response != null ? response.getContinuationToken() : null); - - response = this.client.readDocuments(this.documentCollectionUri, options).take(1) - .subscribeOn(schedulerForBlockingWork).single().block(); - - documents.addAll(response.getResults()); - } while (response.getContinuationToken() != null); - - for (Document document : documents) { - try { - this.client.deleteDocument(document.getSelfLink(), null) - .subscribeOn(schedulerForBlockingWork).single().block(); - } catch (RuntimeException exEx) { - CosmosClientException dce = getDocumentClientExceptionCause(exEx); - - if (dce.getStatusCode() != 404) { - logger.info("Error occurred while deleting {} from {}", dce, client.getWriteEndpoint()); - } - } - } - - logger.info("Deleted all documents from region {}", this.client.getWriteEndpoint()); - } - - private CosmosClientException getDocumentClientExceptionCause(Throwable e) { - while (e != null) { - - if (e instanceof CosmosClientException) { - return (CosmosClientException) e; - } - - e = e.getCause(); - } - - return null; - } - - public void shutdown() { - executor.shutdown(); - client.close(); - } -} From c08471ec6341cdaf07122b188b1fde853e6a7b50 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 4 Mar 2020 18:50:41 -0800 Subject: [PATCH 009/110] Everything runs; added explanatory comments in SampleChangeFeedProcessor Re: POJO vs JSON for inserting items --- .idea/azure-cosmos-java-sql-api-samples.iml | 3 ++- pom.xml | 9 +-------- .../examples/changefeed/SampleChangeFeedProcessor.java | 9 +++++---- .../async/SampleStoredProcedureAsync.java | 3 +++ 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/.idea/azure-cosmos-java-sql-api-samples.iml b/.idea/azure-cosmos-java-sql-api-samples.iml index cc3de2b..5b80b1e 100644 --- a/.idea/azure-cosmos-java-sql-api-samples.iml +++ b/.idea/azure-cosmos-java-sql-api-samples.iml @@ -10,7 +10,7 @@ - + @@ -27,6 +27,7 @@ + diff --git a/pom.xml b/pom.xml index 475bf54..2c82b79 100644 --- a/pom.xml +++ b/pom.xml @@ -77,14 +77,7 @@ com.azure azure-cosmos - 4.0.1-beta.1.dev.20200228.2 - - - com.azure - azure-core - - + 4.0.1-beta.1 - \ No newline at end of file diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index 948c95a..c99b4bb 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -60,16 +60,17 @@ public static void main (String[]args) { CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); - System.out.println("Got here\n"); changeFeedProcessorInstance.start() .subscribeOn(Schedulers.elastic()) .doOnSuccess(aVoid -> { - System.out.println("!doOnSuccess!\n"); - createNewDocumentsJSON(feedContainer, 10, Duration.ofSeconds(3)); + //Insert 10 documents into the feed container + //createNewDocumentsCustomPOJO demonstrates how to insert a custom POJO into a Cosmos DB container as an item + //createNewDocumentsJSON demonstrates how to insert a JSON object into a Cosmos DB container as an item + createNewDocumentsCustomPOJO(feedContainer, 5, Duration.ofSeconds(3)); + createNewDocumentsJSON(feedContainer, 5, Duration.ofSeconds(3)); isWorkCompleted = true; }) .subscribe(); - System.out.println("and here\n"); long remainingWork = WAIT_FOR_WORK; while (!isWorkCompleted && remainingWork > 0) { diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java index 69013de..78932e8 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -14,6 +14,7 @@ import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.util.UUID; @@ -126,6 +127,8 @@ private void readAllSprocs() throws Exception { final CountDownLatch completionLatch = new CountDownLatch(1); + + fluxResponse.flatMap(storedProcedureProperties -> { logger.info(String.format("Stored Procedure: %s\n",storedProcedureProperties.getId())); return Mono.empty(); From aa65b13f2da58dcc768dfdb9ae253185c0542205 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Thu, 5 Mar 2020 03:53:01 -0800 Subject: [PATCH 010/110] Cosmetic fixes --- .../crudquickstart/sync/SampleCRUDQuickstart.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index 637879f..7fa2f0c 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -148,15 +148,13 @@ private void createFamilies(List families) throws Exception { // // Get request charge and other properties like latency, and diagnostics strings, etc. - logger.info(String.format("Created item with request charge of %.2f within" + - " duration %s", + System.out.println(String.format("Created item with request charge of %.2f within duration %s", item.getRequestCharge(), item.getRequestLatency())); + totalRequestCharge += item.getRequestCharge(); } - logger.info(String.format("Created %d items with total request " + - "charge of %.2f", - families.size(), - totalRequestCharge)); + System.out.println(String.format("Created %d items with total request charge of %.2f", + families.size(), totalRequestCharge)); } private void readItems(ArrayList familiesToCreate) { From 36ea3273922910430ff7df902bc904f68ceae0e7 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Thu, 5 Mar 2020 07:13:11 -0800 Subject: [PATCH 011/110] In SampleGroceryStore changefeed triggers three core functionalities --- .../workedappexample/SampleGroceryStore.java | 323 ++++++++++++++++++ 1 file changed, 323 insertions(+) create mode 100644 src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java diff --git a/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java b/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java new file mode 100644 index 0000000..21b5512 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java @@ -0,0 +1,323 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples.workedappexample; + +import com.azure.cosmos.ChangeFeedProcessor; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncContainerResponse; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainerProperties; +import com.azure.cosmos.CosmosContainerRequestOptions; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.implementation.Utils; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.scheduler.Schedulers; + +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.util.Date; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Sample for Change Feed Processor. + * + */ +public class SampleGroceryStore { + + public static int WAIT_FOR_WORK = 60000; + public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); + public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); + private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); + protected static Logger logger = LoggerFactory.getLogger(SampleGroceryStore.class.getSimpleName()); + + + private static ChangeFeedProcessor changeFeedProcessorInstance; + private static boolean isWorkCompleted = false; + + private static CosmosAsyncContainer typeContainer; + private static CosmosAsyncContainer expiryDateContainer; + + public static void main (String[]args) { + logger.info("BEGIN Sample"); + + try { + + System.out.println("-->CREATE DocumentClient"); + CosmosAsyncClient client = getCosmosClient(); + + System.out.println("-->CREATE Contoso Grocery Store database: " + DATABASE_NAME); + CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); + + System.out.println("-->CREATE container for store inventory: " + COLLECTION_NAME); + CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME, "/id"); + + System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); + CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); + + System.out.println("-->CREATE container for materialized view partitioned by 'type': " + COLLECTION_NAME + "-leases"); + typeContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME + "-pktype", "/type"); + + System.out.println("-->CREATE container for materialized view with aggregation rule based on days until expiration " + COLLECTION_NAME + "-leases"); + expiryDateContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME + "-pkexpiryDate", "/expiryDaysRemaining"); + + changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .doOnSuccess(aVoid -> { + //Insert 10 documents into the feed container + //createNewDocumentsJSON demonstrates how to insert a JSON object into a Cosmos DB container as an item + createNewDocumentsJSON(feedContainer, 10, Duration.ofSeconds(3)); + isWorkCompleted = true; + }) + .subscribe(); + + long remainingWork = WAIT_FOR_WORK; + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + + if (isWorkCompleted) { + if (changeFeedProcessorInstance != null) { + changeFeedProcessorInstance.stop().subscribe(); + } + } else { + throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); + } + + System.out.println("-->DELETE sample's database: " + DATABASE_NAME); + deleteDatabase(cosmosDatabase); + + Thread.sleep(500); + + } catch (Exception e) { + e.printStackTrace(); + } + + System.out.println("END Sample"); + } + + public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { + return ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + //System.out.println("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + /* System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); */ + //Each document update from the feed container branches out to all three user services + updateInventoryAlertService(document); + updateInventoryTypeMaterializedView(document); + updateInventoryExpiryDateAggregationPolicyMaterializedView(document); + + //Forward document => + } + //System.out.println("--->handleChanges() END"); + + }) + .build(); + } + + private static void updateInventoryAlertService(JsonNode document) { + System.out.println("Alert: Added new item of type " + document.get("type") + "\n"); + } + + private static void updateInventoryTypeMaterializedView(JsonNode document) { + typeContainer.createItem(document).subscribe(); + } + + private static void updateInventoryExpiryDateAggregationPolicyMaterializedView(JsonNode document) { + ObjectMapper mapper = new ObjectMapper(); + JsonNode transformed_document = null; + + //Deep-copy the input document + try { + transformed_document = document.deepCopy(); + } catch (Exception e) { + e.printStackTrace(); + } + + DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd"); + + try { + long days_passed = TimeUnit.MILLISECONDS.toDays + ( + ((Date) formatter.parse("2020-03-30")).getTime() - ((Date) formatter.parse(document.get("expiryDate").textValue())).getTime() + ); + + ((ObjectNode)transformed_document).remove("expiryDate"); + ((ObjectNode) transformed_document).put("expiryDaysRemaining", String.format("%d", days_passed)); + + expiryDateContainer.createItem(transformed_document).subscribe(); + } catch (ParseException e) { + e.printStackTrace(); + } + typeContainer.createItem(document).subscribe(); + } + + public static CosmosAsyncClient getCosmosClient() { + + return new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + } + + public static CosmosAsyncDatabase createNewDatabase(CosmosAsyncClient client, String databaseName) { + return client.createDatabaseIfNotExists(databaseName).block().getDatabase(); + } + + public static void deleteDatabase(CosmosAsyncDatabase cosmosDatabase) { + cosmosDatabase.delete().block(); + } + + public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, String databaseName, String collectionName, String partitionKey) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer collectionLink = databaseLink.getContainer(collectionName); + CosmosAsyncContainerResponse containerResponse = null; + + try { + containerResponse = collectionLink.read().block(); + + if (containerResponse != null) { + throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, partitionKey); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); + + if (containerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); + } + + return containerResponse.getContainer(); + } + + public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient client, String databaseName, String leaseCollectionName) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); + CosmosAsyncContainerResponse leaseContainerResponse = null; + + try { + leaseContainerResponse = leaseCollectionLink.read().block(); + + if (leaseContainerResponse != null) { + leaseCollectionLink.delete().block(); + + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); + + if (leaseContainerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); + } + + return leaseContainerResponse.getContainer(); + } + + public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { + System.out.println("Creating documents\n"); + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { + + String jsonString = "{\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"" + + "," + + "\"brand\" : \"" + ((char)(65+i)) + "\"" + + "," + + "\"type\" : \"" + ((char)(69+i)) + "\"" + + "," + + "\"expiryDate\" : \"" + "2020-03-" + StringUtils.leftPad(String.valueOf(5+i), 2, "0") + "\"" + + "}"; + + ObjectMapper mapper = new ObjectMapper(); + JsonNode document = null; + + try { + document = mapper.readTree(jsonString); + } catch (Exception e) { + e.printStackTrace(); + } + + containerClient.createItem(document).subscribe(doc -> { + System.out.println(".\n"); + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } + + public static boolean ensureWorkIsDone(Duration delay) { + long remainingWork = delay.toMillis(); + try { + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + return false; + } + + return remainingWork > 0; + } + +} From c210555e4b0566d72879aa5a9f355ecdb56cc253 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:14:27 -0700 Subject: [PATCH 012/110] Updated pom.xml --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 2c82b79..57eb621 100644 --- a/pom.xml +++ b/pom.xml @@ -77,7 +77,7 @@ com.azure azure-cosmos - 4.0.1-beta.1 + 4.0.0-preview.2 \ No newline at end of file From c40ea1ca5d1288ada7756ef8ea7c313615795f6a Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:19:50 -0700 Subject: [PATCH 013/110] Deleted .idea baggage --- .idea/.gitignore | 2 - .idea/.name | 1 - .idea/azure-cosmos-java-sql-api-samples.iml | 52 --------------------- .idea/codeStyles/Project.xml | 19 -------- .idea/codeStyles/codeStyleConfig.xml | 5 -- .idea/compiler.xml | 21 --------- .idea/encodings.xml | 7 --- .idea/misc.xml | 14 ------ .idea/modules.xml | 8 ---- .idea/vcs.xml | 6 --- 10 files changed, 135 deletions(-) delete mode 100644 .idea/.gitignore delete mode 100644 .idea/.name delete mode 100644 .idea/azure-cosmos-java-sql-api-samples.iml delete mode 100644 .idea/codeStyles/Project.xml delete mode 100644 .idea/codeStyles/codeStyleConfig.xml delete mode 100644 .idea/compiler.xml delete mode 100644 .idea/encodings.xml delete mode 100644 .idea/misc.xml delete mode 100644 .idea/modules.xml delete mode 100644 .idea/vcs.xml diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index e7e9d11..0000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Default ignored files -/workspace.xml diff --git a/.idea/.name b/.idea/.name deleted file mode 100644 index c486999..0000000 --- a/.idea/.name +++ /dev/null @@ -1 +0,0 @@ -Unknown \ No newline at end of file diff --git a/.idea/azure-cosmos-java-sql-api-samples.iml b/.idea/azure-cosmos-java-sql-api-samples.iml deleted file mode 100644 index 5b80b1e..0000000 --- a/.idea/azure-cosmos-java-sql-api-samples.iml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml deleted file mode 100644 index 0940777..0000000 --- a/.idea/codeStyles/Project.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml deleted file mode 100644 index a55e7a1..0000000 --- a/.idea/codeStyles/codeStyleConfig.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/compiler.xml b/.idea/compiler.xml deleted file mode 100644 index c4306b9..0000000 --- a/.idea/compiler.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/encodings.xml b/.idea/encodings.xml deleted file mode 100644 index aa00ffa..0000000 --- a/.idea/encodings.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 01b1034..0000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index bf36d5d..0000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 94a25f7..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file From 9f73ffad33bd3b8238de142f9c01b358c8612365 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:21:51 -0700 Subject: [PATCH 014/110] Updated README.md header --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 4614906..35c7d26 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ --- page_type: sample languages: -- csharp +- java products: -- dotnet -description: "Add 150 character max description" -urlFragment: "update-this-to-unique-url-stub" +- java sdk +description: "Sample code repo for Azure Cosmos DB Java SDK for SQL API" +urlFragment: "" --- # Azure Cosmos DB Java SQL API Samples From 601648eedbb1cd4dbd0f51f573240ed3e87db531 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:47:33 -0700 Subject: [PATCH 015/110] README.md update --- README.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 35c7d26..1ef21d3 100644 --- a/README.md +++ b/README.md @@ -18,36 +18,44 @@ Guidance on onboarding samples to docs.microsoft.com/samples: https://review.doc Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master --> -Give a short description for your sample here. What does it do and why is it important? +Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and running these samples, and then studying their implementation, you can have an example for sending various requests to Azure Cosmos DB from Java SDK via the SQL API. ## Contents -Outline the file contents of the repository. It helps users navigate the codebase, build configuration and any related assets. - | File/folder | Description | |-------------------|--------------------------------------------| -| `src` | Sample source code. | +| `src` | Java sample source code. Many samples have 'sync' and sync' variants | | `.gitignore` | Define what to ignore at commit time. | | `CHANGELOG.md` | List of changes to the sample. | | `CONTRIBUTING.md` | Guidelines for contributing to the sample. | | `README.md` | This README file. | | `LICENSE` | The license for the sample. | +| `pom.xml` | Maven Project Object Model File ## Prerequisites -Outline the required components and tools that a user might need to have on their machine in order to run the sample. This can be anything from frameworks, SDKs, OS versions or IDE releases. +* A Java IDE such as IntelliJ IDEA or VSCode +* Maven +* Setting up an Azure Cosmos DB account through the Azure Portal. The **Create a database account** section of [this guide](https://docs.microsoft.com/en-us/azure/cosmos-db/create-sql-api-java) walks you through account creation. +* The hostname and master key for your Azure Cosmos DB account ## Setup -Explain how to prepare the sample once the user clones or downloads the repository. The section should outline every step necessary to install dependencies and set up any settings (for example, API keys and output folders). +Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven project. ## Running the sample +Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. +In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname***;ACCOUNT_KEY=** *your account key* + Outline step-by-step instructions to execute the sample and see its output. Include steps for executing the sample from the IDE, starting specific services in the Azure portal or anything related to the overall launch of the code. ## Key concepts -Provide users with more context on the tools and services used in the sample. Explain some of the code that is being used and how services interact with each other. +These samples cover a range of Azure Cosmos DB usage topics from more to less basic: +* Basic management of databases, containers and items +* Indexing, stored procedures, and Change Feed +* An end-to-end application sample ## Contributing From c961f4e79ca9b29402cf2108055999260ac7ddad Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:47:46 -0700 Subject: [PATCH 016/110] README.md update2 --- README.md | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1ef21d3..5a57440 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Guidance on onboarding samples to docs.microsoft.com/samples: https://review.doc Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master --> -Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and running these samples, and then studying their implementation, you can have an example for sending various requests to Azure Cosmos DB from Java SDK via the SQL API. +Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and running these samples, and then studying their implementation, you will have an example for sending various requests to Azure Cosmos DB from Java SDK via the SQL API. ## Contents @@ -46,16 +46,23 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec ## Running the sample Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. -In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname***;ACCOUNT_KEY=** *your account key* +In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname***;ACCOUNT_KEY=** *your account master key* which gives the sample read/write access to your account. -Outline step-by-step instructions to execute the sample and see its output. Include steps for executing the sample from the IDE, starting specific services in the Azure portal or anything related to the overall launch of the code. +To choose which sample will run, populate the **Main class field** with **com.azure.cosmos.examples.changefeed.***sample* where *sample* can be +* SampleCRUDQuickstart +* SampleCRUDQuickstartAsync +* SampleIndexManagement +* SampleIndexManagementAsync +* SampleStoredProcedure +* SampleStoredProcedureAsync +* Sample ChangeFeedProcessor ## Key concepts These samples cover a range of Azure Cosmos DB usage topics from more to less basic: * Basic management of databases, containers and items * Indexing, stored procedures, and Change Feed -* An end-to-end application sample +* An end-to-end application sample (*coming soon*) ## Contributing From c4caeae22a4093a7d630aeaf03b033fc1eb9f206 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:49:12 -0700 Subject: [PATCH 017/110] README.md update3 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5a57440..635e336 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and runnin | File/folder | Description | |-------------------|--------------------------------------------| -| `src` | Java sample source code. Many samples have 'sync' and sync' variants | +| `src` | Java sample source code. Many samples have 'sync' and 'async' variants | | `.gitignore` | Define what to ignore at commit time. | | `CHANGELOG.md` | List of changes to the sample. | | `CONTRIBUTING.md` | Guidelines for contributing to the sample. | From 6ea09a0b1b3415687116624abfd124c0dda7f2be Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:50:15 -0700 Subject: [PATCH 018/110] README.md update3 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 635e336..ee0508a 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec ## Running the sample -Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. +If you are using Intellij IDEA: Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname***;ACCOUNT_KEY=** *your account master key* which gives the sample read/write access to your account. To choose which sample will run, populate the **Main class field** with **com.azure.cosmos.examples.changefeed.***sample* where *sample* can be From 202321781250d4783c808c7ce0381ae32f569b1c Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:51:55 -0700 Subject: [PATCH 019/110] README.md update3 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ee0508a..b27457a 100644 --- a/README.md +++ b/README.md @@ -46,9 +46,9 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec ## Running the sample If you are using Intellij IDEA: Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. -In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname***;ACCOUNT_KEY=** *your account master key* which gives the sample read/write access to your account. +In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname* **;ACCOUNT_KEY=** *your account master key* which gives the sample read/write access to your account. -To choose which sample will run, populate the **Main class field** with **com.azure.cosmos.examples.changefeed.***sample* where *sample* can be +To choose which sample will run, populate the **Main class field** with **com.azure.cosmos.examples.changefeed.** *sample* where *sample* can be * SampleCRUDQuickstart * SampleCRUDQuickstartAsync * SampleIndexManagement From e0ef69f7479770ddcc085d950e729f8e7a08aa01 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:54:09 -0700 Subject: [PATCH 020/110] README.md update3 --- README.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b27457a..fcfed5c 100644 --- a/README.md +++ b/README.md @@ -46,9 +46,21 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec ## Running the sample If you are using Intellij IDEA: Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. -In **Environment variables** paste **ACCOUNT_HOST=** *your account hostname* **;ACCOUNT_KEY=** *your account master key* which gives the sample read/write access to your account. +In **Environment variables** paste -To choose which sample will run, populate the **Main class field** with **com.azure.cosmos.examples.changefeed.** *sample* where *sample* can be +''' +ACCOUNT_HOST= your account hostname;ACCOUNT_KEY=your account master key +''' + +which gives the sample read/write access to your account. + +To choose which sample will run, populate the **Main class** field with + +''' +com.azure.cosmos.examples.changefeed.sample +''' + +where *sample* can be * SampleCRUDQuickstart * SampleCRUDQuickstartAsync * SampleIndexManagement From 8fa2f883bc46d5b795a5e81e9d83951fa3d5f01c Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 09:55:20 -0700 Subject: [PATCH 021/110] README.md update4 --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index fcfed5c..2278760 100644 --- a/README.md +++ b/README.md @@ -48,17 +48,17 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec If you are using Intellij IDEA: Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. In **Environment variables** paste -''' +``` ACCOUNT_HOST= your account hostname;ACCOUNT_KEY=your account master key -''' +``` which gives the sample read/write access to your account. To choose which sample will run, populate the **Main class** field with -''' +``` com.azure.cosmos.examples.changefeed.sample -''' +``` where *sample* can be * SampleCRUDQuickstart From a86fb5b067511555288059fdf952541d5e8f72b6 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 11:15:27 -0700 Subject: [PATCH 022/110] README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2278760..d984a85 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and runnin * A Java IDE such as IntelliJ IDEA or VSCode * Maven +* Java SE JRE 8 * Setting up an Azure Cosmos DB account through the Azure Portal. The **Create a database account** section of [this guide](https://docs.microsoft.com/en-us/azure/cosmos-db/create-sql-api-java) walks you through account creation. * The hostname and master key for your Azure Cosmos DB account From d85a9ea342956f8da20597e532f3a5e5886e54ac Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 11:40:13 -0700 Subject: [PATCH 023/110] README updatre --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d984a85..9eaa709 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ where *sample* can be * SampleIndexManagementAsync * SampleStoredProcedure * SampleStoredProcedureAsync -* Sample ChangeFeedProcessor +* SampleChangeFeedProcessor ## Key concepts From 3e8c2a83bf6011ae14ded85f22b9be0fb659635e Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 13:12:20 -0700 Subject: [PATCH 024/110] Clearer Change Feed example, shows POJO/JSON usage --- README.md | 4 +- .../changefeed/SampleChangeFeedProcessor.java | 69 ++++++++----------- 2 files changed, 29 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 9eaa709..78f309c 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Guidance on onboarding samples to docs.microsoft.com/samples: https://review.doc Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master --> -Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and running these samples, and then studying their implementation, you will have an example for sending various requests to Azure Cosmos DB from Java SDK via the SQL API. +Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and running these samples, and then studying their implementations, you will have an example for sending various requests to Azure Cosmos DB from Java SDK via the SQL API. ## Contents @@ -50,7 +50,7 @@ If you are using Intellij IDEA: Once you have opened the project, go to the **Ru In **Environment variables** paste ``` -ACCOUNT_HOST= your account hostname;ACCOUNT_KEY=your account master key +ACCOUNT_HOST=your account hostname;ACCOUNT_KEY=your account master key ``` which gives the sample read/write access to your account. diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index c99b4bb..7e69caf 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -46,7 +46,16 @@ public static void main (String[]args) { logger.info("BEGIN Sample"); try { - + //This sample models an application where documents are being inserted into one container (the "feed container"), + //and meanwhile another worker thread or worker application is pulling inserted documents from the feed container's Change Feed + //and operating on them in some way. For one or more workers to process the Change Feed, the workers must first contact the server + //and "lease" access to monitor one or more partitions of the feed container. The Change Feed Processor Library + //handles leasing automatically for you, however you must create a separate "lease container" where the Change Feed + //Processor Library can store and track leases. + + //Summary of the next four commands: + //-Create an asynchronous Azure Cosmos DB client and database so that we can issue async requests to the DB + //-Create a "feed container" and a "lease container" in the DB System.out.println("-->CREATE DocumentClient"); CosmosAsyncClient client = getCosmosClient(); @@ -59,25 +68,28 @@ public static void main (String[]args) { System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); + //Now, create and start the Change Feed Processor. See the implementation of getChangeFeedProcessor() for guidance + //on creating a handler for Change Feed events. In this stream, we also trigger the insertion of 10 documents on a separate + //thread. changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); changeFeedProcessorInstance.start() .subscribeOn(Schedulers.elastic()) .doOnSuccess(aVoid -> { //Insert 10 documents into the feed container - //createNewDocumentsCustomPOJO demonstrates how to insert a custom POJO into a Cosmos DB container as an item - //createNewDocumentsJSON demonstrates how to insert a JSON object into a Cosmos DB container as an item - createNewDocumentsCustomPOJO(feedContainer, 5, Duration.ofSeconds(3)); - createNewDocumentsJSON(feedContainer, 5, Duration.ofSeconds(3)); + createNewDocumentsCustomPOJO(feedContainer, 10, Duration.ofSeconds(3)); isWorkCompleted = true; }) .subscribe(); + //Model of a worker thread or application which leases access to monitor one or more feed container + //partitions via the Change Feed. In a real-world application you might deploy this code in an Azure function long remainingWork = WAIT_FOR_WORK; while (!isWorkCompleted && remainingWork > 0) { Thread.sleep(100); remainingWork -= 100; } + //When all documents have been processed, clean up if (isWorkCompleted) { if (changeFeedProcessorInstance != null) { changeFeedProcessorInstance.stop().subscribe(); @@ -108,8 +120,18 @@ public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, Cosmos for (JsonNode document : docs) { try { + //Change Feed hands the document to you in the form of a JsonNode + //As a developer you have two options for handling the JsonNode document provided to you by Change Feed + //One option is to operate on the document in the form of a JsonNode, as shown below. This is great + //especially if you do not have a single uniform data model for all documents. System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() .writeValueAsString(document)); + + //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, + //as shown below. + CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); + System.out.println("----=>id: " + pojo_doc.getId()); + } catch (JsonProcessingException e) { e.printStackTrace(); } @@ -264,43 +286,6 @@ public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerCl } } - public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { - String suffix = RandomStringUtils.randomAlphabetic(10); - for (int i = 0; i <= count; i++) { - - String jsonString = "{\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"}"; - - ObjectMapper mapper = new ObjectMapper(); - JsonNode document = null; - - try { - document = mapper.readTree(jsonString); - } catch (Exception e) { - e.printStackTrace(); - } - - containerClient.createItem(document).subscribe(doc -> { - try { - System.out.println("---->DOCUMENT WRITE: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(doc)); - } catch (JsonProcessingException e) { - System.err.println(String.format("Failure in processing json %s", e.getMessage())); - } - }); - - long remainingWork = delay.toMillis(); - try { - while (remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - } catch (InterruptedException iex) { - // exception caught - break; - } - } - } - public static boolean ensureWorkIsDone(Duration delay) { long remainingWork = delay.toMillis(); try { From 4042f4d612f931f7afece17f70509374cadd7ee1 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 13:54:10 -0700 Subject: [PATCH 025/110] Updated README.md --- README.md | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 78f309c..829a2ee 100644 --- a/README.md +++ b/README.md @@ -46,8 +46,9 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec ## Running the sample -If you are using Intellij IDEA: Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. In the **Edit Configurations** dialog, click **+** (**Add New Configuration**) and give the new configuration a name. -In **Environment variables** paste +*If you are using Intellij IDEA as your Java IDE:* Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. +In the **Edit Configurations** dialog, click the **+** (**Add New Configuration**) button, select **Application** as the configuration type, + and give the new configuration a name. Once you are looking at the settings for your new Configuration, find **Environment variables** and paste ``` ACCOUNT_HOST=your account hostname;ACCOUNT_KEY=your account master key @@ -55,7 +56,7 @@ ACCOUNT_HOST=your account hostname;ACCOUNT_KEY=your account master key which gives the sample read/write access to your account. -To choose which sample will run, populate the **Main class** field with +To choose which sample will run, populate the **Main class** field of the Configuration with ``` com.azure.cosmos.examples.changefeed.sample @@ -70,12 +71,20 @@ where *sample* can be * SampleStoredProcedureAsync * SampleChangeFeedProcessor +*Build and execute from command line without an IDE:* From top-level directory of repo: +``` +mvn clean package +mvn exec:java -Dexec.mainClass="com.azure.cosmos.examples.changefeed.sample" -DACCOUNT_HOST=your account hostname -DACCOUNT_KEY=your account master key +``` + +where *sample*, *your account hostname*, and *your account master key* are to be filled in as above. This will rebuild and run the selected sample. + ## Key concepts These samples cover a range of Azure Cosmos DB usage topics from more to less basic: * Basic management of databases, containers and items -* Indexing, stored procedures, and Change Feed -* An end-to-end application sample (*coming soon*) +* Indexing, stored procedures +* Change Feed ## Contributing From 92d5f2c37ca3a86b3eb12cb5b0d06834ab27fdbf Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 13:57:00 -0700 Subject: [PATCH 026/110] Deleted worked app example from master --- .../workedappexample/SampleGroceryStore.java | 323 ------------------ 1 file changed, 323 deletions(-) delete mode 100644 src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java diff --git a/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java b/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java deleted file mode 100644 index 21b5512..0000000 --- a/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -package com.azure.cosmos.examples.workedappexample; - -import com.azure.cosmos.ChangeFeedProcessor; -import com.azure.cosmos.ConnectionPolicy; -import com.azure.cosmos.ConsistencyLevel; -import com.azure.cosmos.CosmosAsyncContainer; -import com.azure.cosmos.CosmosAsyncContainerResponse; -import com.azure.cosmos.CosmosAsyncDatabase; -import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.CosmosContainerProperties; -import com.azure.cosmos.CosmosContainerRequestOptions; -import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.examples.common.AccountSettings; -import com.azure.cosmos.implementation.Utils; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.core.scheduler.Schedulers; - -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.time.Duration; -import java.util.Date; -import java.util.List; -import java.util.concurrent.TimeUnit; - -/** - * Sample for Change Feed Processor. - * - */ -public class SampleGroceryStore { - - public static int WAIT_FOR_WORK = 60000; - public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); - public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); - private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); - protected static Logger logger = LoggerFactory.getLogger(SampleGroceryStore.class.getSimpleName()); - - - private static ChangeFeedProcessor changeFeedProcessorInstance; - private static boolean isWorkCompleted = false; - - private static CosmosAsyncContainer typeContainer; - private static CosmosAsyncContainer expiryDateContainer; - - public static void main (String[]args) { - logger.info("BEGIN Sample"); - - try { - - System.out.println("-->CREATE DocumentClient"); - CosmosAsyncClient client = getCosmosClient(); - - System.out.println("-->CREATE Contoso Grocery Store database: " + DATABASE_NAME); - CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); - - System.out.println("-->CREATE container for store inventory: " + COLLECTION_NAME); - CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME, "/id"); - - System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); - CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); - - System.out.println("-->CREATE container for materialized view partitioned by 'type': " + COLLECTION_NAME + "-leases"); - typeContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME + "-pktype", "/type"); - - System.out.println("-->CREATE container for materialized view with aggregation rule based on days until expiration " + COLLECTION_NAME + "-leases"); - expiryDateContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME + "-pkexpiryDate", "/expiryDaysRemaining"); - - changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); - changeFeedProcessorInstance.start() - .subscribeOn(Schedulers.elastic()) - .doOnSuccess(aVoid -> { - //Insert 10 documents into the feed container - //createNewDocumentsJSON demonstrates how to insert a JSON object into a Cosmos DB container as an item - createNewDocumentsJSON(feedContainer, 10, Duration.ofSeconds(3)); - isWorkCompleted = true; - }) - .subscribe(); - - long remainingWork = WAIT_FOR_WORK; - while (!isWorkCompleted && remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - - if (isWorkCompleted) { - if (changeFeedProcessorInstance != null) { - changeFeedProcessorInstance.stop().subscribe(); - } - } else { - throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); - } - - System.out.println("-->DELETE sample's database: " + DATABASE_NAME); - deleteDatabase(cosmosDatabase); - - Thread.sleep(500); - - } catch (Exception e) { - e.printStackTrace(); - } - - System.out.println("END Sample"); - } - - public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { - return ChangeFeedProcessor.changeFeedProcessorBuilder() - .setHostName(hostName) - .setFeedContainer(feedContainer) - .setLeaseContainer(leaseContainer) - .setHandleChanges((List docs) -> { - //System.out.println("--->setHandleChanges() START"); - - for (JsonNode document : docs) { - /* System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(document)); */ - //Each document update from the feed container branches out to all three user services - updateInventoryAlertService(document); - updateInventoryTypeMaterializedView(document); - updateInventoryExpiryDateAggregationPolicyMaterializedView(document); - - //Forward document => - } - //System.out.println("--->handleChanges() END"); - - }) - .build(); - } - - private static void updateInventoryAlertService(JsonNode document) { - System.out.println("Alert: Added new item of type " + document.get("type") + "\n"); - } - - private static void updateInventoryTypeMaterializedView(JsonNode document) { - typeContainer.createItem(document).subscribe(); - } - - private static void updateInventoryExpiryDateAggregationPolicyMaterializedView(JsonNode document) { - ObjectMapper mapper = new ObjectMapper(); - JsonNode transformed_document = null; - - //Deep-copy the input document - try { - transformed_document = document.deepCopy(); - } catch (Exception e) { - e.printStackTrace(); - } - - DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd"); - - try { - long days_passed = TimeUnit.MILLISECONDS.toDays - ( - ((Date) formatter.parse("2020-03-30")).getTime() - ((Date) formatter.parse(document.get("expiryDate").textValue())).getTime() - ); - - ((ObjectNode)transformed_document).remove("expiryDate"); - ((ObjectNode) transformed_document).put("expiryDaysRemaining", String.format("%d", days_passed)); - - expiryDateContainer.createItem(transformed_document).subscribe(); - } catch (ParseException e) { - e.printStackTrace(); - } - typeContainer.createItem(document).subscribe(); - } - - public static CosmosAsyncClient getCosmosClient() { - - return new CosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) - .setConsistencyLevel(ConsistencyLevel.EVENTUAL) - .buildAsyncClient(); - } - - public static CosmosAsyncDatabase createNewDatabase(CosmosAsyncClient client, String databaseName) { - return client.createDatabaseIfNotExists(databaseName).block().getDatabase(); - } - - public static void deleteDatabase(CosmosAsyncDatabase cosmosDatabase) { - cosmosDatabase.delete().block(); - } - - public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, String databaseName, String collectionName, String partitionKey) { - CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); - CosmosAsyncContainer collectionLink = databaseLink.getContainer(collectionName); - CosmosAsyncContainerResponse containerResponse = null; - - try { - containerResponse = collectionLink.read().block(); - - if (containerResponse != null) { - throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); - } - } catch (RuntimeException ex) { - if (ex instanceof CosmosClientException) { - CosmosClientException cosmosClientException = (CosmosClientException) ex; - - if (cosmosClientException.getStatusCode() != 404) { - throw ex; - } - } else { - throw ex; - } - } - - CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, partitionKey); - CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); - containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); - - if (containerResponse == null) { - throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); - } - - return containerResponse.getContainer(); - } - - public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient client, String databaseName, String leaseCollectionName) { - CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); - CosmosAsyncContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); - CosmosAsyncContainerResponse leaseContainerResponse = null; - - try { - leaseContainerResponse = leaseCollectionLink.read().block(); - - if (leaseContainerResponse != null) { - leaseCollectionLink.delete().block(); - - try { - Thread.sleep(1000); - } catch (InterruptedException ex) { - ex.printStackTrace(); - } - } - } catch (RuntimeException ex) { - if (ex instanceof CosmosClientException) { - CosmosClientException cosmosClientException = (CosmosClientException) ex; - - if (cosmosClientException.getStatusCode() != 404) { - throw ex; - } - } else { - throw ex; - } - } - - CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); - CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); - - leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); - - if (leaseContainerResponse == null) { - throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); - } - - return leaseContainerResponse.getContainer(); - } - - public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { - System.out.println("Creating documents\n"); - String suffix = RandomStringUtils.randomAlphabetic(10); - for (int i = 0; i <= count; i++) { - - String jsonString = "{\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"" - + "," - + "\"brand\" : \"" + ((char)(65+i)) + "\"" - + "," - + "\"type\" : \"" + ((char)(69+i)) + "\"" - + "," - + "\"expiryDate\" : \"" + "2020-03-" + StringUtils.leftPad(String.valueOf(5+i), 2, "0") + "\"" - + "}"; - - ObjectMapper mapper = new ObjectMapper(); - JsonNode document = null; - - try { - document = mapper.readTree(jsonString); - } catch (Exception e) { - e.printStackTrace(); - } - - containerClient.createItem(document).subscribe(doc -> { - System.out.println(".\n"); - }); - - long remainingWork = delay.toMillis(); - try { - while (remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - } catch (InterruptedException iex) { - // exception caught - break; - } - } - } - - public static boolean ensureWorkIsDone(Duration delay) { - long remainingWork = delay.toMillis(); - try { - while (!isWorkCompleted && remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - } catch (InterruptedException iex) { - return false; - } - - return remainingWork > 0; - } - -} From 65f6e14935b43ad4aef86849c8de8edfd57eeda8 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 15:20:01 -0700 Subject: [PATCH 027/110] Improved stored procedure example --- .../changefeed/SampleChangeFeedProcessor.java | 2 +- .../{changefeed => common}/CustomPOJO.java | 7 +- .../sync/SampleStoredProcedure.java | 69 +++++++++++++------ 3 files changed, 52 insertions(+), 26 deletions(-) rename src/main/java/com/azure/cosmos/examples/{changefeed => common}/CustomPOJO.java (66%) diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index 7e69caf..c15409f 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -12,8 +12,8 @@ import com.azure.cosmos.CosmosClientException; import com.azure.cosmos.CosmosContainerProperties; import com.azure.cosmos.CosmosContainerRequestOptions; -import com.azure.cosmos.implementation.CosmosItemProperties; import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.examples.common.CustomPOJO; import com.azure.cosmos.implementation.Utils; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java b/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java similarity index 66% rename from src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java rename to src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java index 9562196..7704081 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/CustomPOJO.java +++ b/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java @@ -1,6 +1,4 @@ -package com.azure.cosmos.examples.changefeed; - -import com.azure.cosmos.JsonSerializable; +package com.azure.cosmos.examples.common; public class CustomPOJO { private String id; @@ -9,6 +7,9 @@ public CustomPOJO() { } + public CustomPOJO(String id) { + this.id=id; + } public String getId() { return id; diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java index 193504f..fe96c54 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -6,6 +6,9 @@ import com.azure.cosmos.*; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.CustomPOJO; +import com.azure.cosmos.implementation.Utils; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -17,8 +20,8 @@ public class SampleStoredProcedure { private CosmosClient client; - private final String databaseName = "AzureSampleFamilyDB"; - private final String containerName = "FamilyContainer"; + private final String databaseName = "SprocTestDB"; + private final String containerName = "SprocTestContainer"; private CosmosDatabase database; private CosmosContainer container; @@ -42,12 +45,12 @@ public static void main(String[] args) { try { p.sprocDemo(); - logger.info("Demo complete, please hold while resources are released"); + System.out.println("Demo complete, please hold while resources are released"); p.shutdown(); - logger.info("Done.\n"); + System.out.println("Done.\n"); } catch (Exception e) { e.printStackTrace(); - logger.error(String.format("Cosmos getStarted failed with %s", e)); + System.err.println(String.format("Cosmos getStarted failed with %s", e)); p.close(); } finally { } @@ -56,17 +59,26 @@ public static void main(String[] args) { // private void sprocDemo() throws Exception { - //Setup client, DB + //Setup client, DB, and the container for which we will create stored procedures + //The container partition key will be id setUp(); - //Create, list and execute stored procedure + //Create stored procedure and list all stored procedures that have been created. createStoredProcedure(); readAllSprocs(); + + //Execute the stored procedure, which we expect will create an item with id test_doc executeStoredProcedure(); + + //Perform a point-read to confirm that the item with id test_doc exists + System.out.println("Checking that a document was created by the stored procedure..."); + CosmosItemResponse test_resp = container.readItem("test_doc",new PartitionKey("test_doc"),CustomPOJO.class); + System.out.println(String.format( + "Result of point-read for document created by stored procedure (200 indicates success): %d",test_resp.getStatusCode())); } public void setUp() throws Exception{ - logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -82,12 +94,12 @@ public void setUp() throws Exception{ .setConsistencyLevel(ConsistencyLevel.EVENTUAL) .buildClient(); - logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + System.out.println("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); database = client.createDatabaseIfNotExists(databaseName).getDatabase(); CosmosContainerProperties containerProperties = - new CosmosContainerProperties(containerName, "/lastName"); + new CosmosContainerProperties(containerName, "/id"); container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); } @@ -97,16 +109,28 @@ public void shutdown() throws Exception { } public void createStoredProcedure() throws Exception { - logger.info("Creating stored procedure...\n"); - - sprocId = UUID.randomUUID().toString(); - CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,"function() {var x = 11;}"); + System.out.println("Creating stored procedure..."); + + sprocId = "createMyDocument"; + String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,sprocBody); container.getScripts() .createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()); } private void readAllSprocs() throws Exception { + System.out.println("Listing all stored procedures associated with container " + containerName + "\n"); FeedOptions feedOptions = new FeedOptions(); CosmosContinuablePagedIterable feedResponseIterable = @@ -116,34 +140,35 @@ private void readAllSprocs() throws Exception { while(feedResponseIterator.hasNext()) { CosmosStoredProcedureProperties storedProcedureProperties = feedResponseIterator.next(); - logger.info(String.format("Stored Procedure: %s\n",storedProcedureProperties)); + System.out.println(String.format("Stored Procedure: %s",storedProcedureProperties)); } - logger.info("\n"); } public void executeStoredProcedure() throws Exception { - logger.info(String.format("Executing stored procedure %s...\n\n",sprocId)); + System.out.println(String.format("Executing stored procedure %s...\n\n",sprocId)); CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); - options.setPartitionKey(PartitionKey.NONE); + options.setPartitionKey(new PartitionKey("test_doc")); CosmosStoredProcedureResponse executeResponse = container.getScripts() .getStoredProcedure(sprocId) .execute(null, options); - logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + System.out.println(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", sprocId, executeResponse.responseAsString(), executeResponse.getStatusCode(), - //executeResponse.getRequestLatency().toString(), executeResponse.getRequestCharge())); } public void deleteStoredProcedure() throws Exception { - logger.info("-Deleting stored procedure...\n"); + System.out.println("-Deleting stored procedure...\n"); container.getScripts() .getStoredProcedure(sprocId) .delete(); - logger.info("-Closing client instance...\n"); + System.out.println("-Deleting database...\n"); + database.delete(); + System.out.println("-Closing client instance...\n"); client.close(); + System.out.println("Done."); } } From 348efd96e71ed95c3b147bd2e563874b579e85f0 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 15:37:45 -0700 Subject: [PATCH 028/110] Updated async stored procedures example --- .../async/SampleStoredProcedureAsync.java | 77 ++++++++++++------- src/main/resources/log4j2.properties | 10 +-- 2 files changed, 55 insertions(+), 32 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java index 78932e8..2ac24fb 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -11,6 +11,7 @@ import com.azure.cosmos.*; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.CustomPOJO; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -24,8 +25,8 @@ public class SampleStoredProcedureAsync { private CosmosAsyncClient client; - private final String databaseName = "AzureSampleFamilyDB"; - private final String containerName = "FamilyContainer"; + private final String databaseName = "SprocTestDB"; + private final String containerName = "SprocTestContainer"; private CosmosAsyncDatabase database; private CosmosAsyncContainer container; @@ -49,12 +50,12 @@ public static void main(String[] args) { try { p.sprocDemo(); - logger.info("Demo complete, please hold while resources are released"); + System.out.println("Demo complete, please hold while resources are released"); p.shutdown(); - logger.info("Done.\n"); + System.out.println("Done.\n"); } catch (Exception e) { e.printStackTrace(); - logger.error(String.format("Cosmos getStarted failed with %s", e)); + System.out.println(String.format("Cosmos getStarted failed with %s", e)); p.close(); } finally { } @@ -63,17 +64,27 @@ public static void main(String[] args) { // private void sprocDemo() throws Exception { - //Setup client, DB - setUp(); - - //Create, list and execute stored procedure - createStoredProcedure(); - readAllSprocs(); - executeStoredProcedure(); + //Setup client, DB, and the container for which we will create stored procedures + //The container partition key will be id + setUp(); + + //Create stored procedure and list all stored procedures that have been created. + createStoredProcedure(); + readAllSprocs(); + + //Execute the stored procedure, which we expect will create an item with id test_doc + executeStoredProcedure(); + + //Perform a point-read to confirm that the item with id test_doc exists + System.out.println("Checking that a document was created by the stored procedure..."); + CosmosAsyncItemResponse test_resp = + container.readItem("test_doc",new PartitionKey("test_doc"),CustomPOJO.class).block(); + System.out.println(String.format( + "Status return value of point-read for document created by stored procedure (200 indicates success): %d",test_resp.getStatusCode())); } public void setUp() throws Exception{ - logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -89,7 +100,7 @@ public void setUp() throws Exception{ .setConsistencyLevel(ConsistencyLevel.EVENTUAL) .buildAsyncClient(); - logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + System.out.println("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); client.createDatabaseIfNotExists(databaseName).flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); @@ -97,7 +108,7 @@ public void setUp() throws Exception{ }).block(); CosmosContainerProperties containerProperties = - new CosmosContainerProperties(containerName, "/lastName"); + new CosmosContainerProperties(containerName, "/id"); database.createContainerIfNotExists(containerProperties, 400).flatMap(containerResponse -> { container = containerResponse.getContainer(); return Mono.empty(); @@ -110,10 +121,21 @@ public void shutdown() throws Exception { } public void createStoredProcedure() throws Exception { - logger.info("Creating stored procedure...\n"); - - sprocId = UUID.randomUUID().toString(); - CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,"function() {var x = 11;}"); + System.out.println("Creating stored procedure...\n"); + + sprocId = "createMyDocument"; + String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,sprocBody); container.getScripts() .createStoredProcedure(storedProcedureDef, new CosmosStoredProcedureRequestOptions()).block(); @@ -130,7 +152,7 @@ private void readAllSprocs() throws Exception { fluxResponse.flatMap(storedProcedureProperties -> { - logger.info(String.format("Stored Procedure: %s\n",storedProcedureProperties.getId())); + System.out.println(String.format("Stored Procedure: %s\n",storedProcedureProperties.getId())); return Mono.empty(); }).subscribe( s -> {}, @@ -139,7 +161,7 @@ private void readAllSprocs() throws Exception { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.error(String.format("Read Item failed with %s\n", cerr)); + System.out.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -154,31 +176,32 @@ private void readAllSprocs() throws Exception { } public void executeStoredProcedure() throws Exception { - logger.info(String.format("Executing stored procedure %s...\n\n",sprocId)); + System.out.println(String.format("Executing stored procedure %s...\n\n",sprocId)); CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); - options.setPartitionKey(PartitionKey.NONE); + options.setPartitionKey(new PartitionKey("test_doc")); container.getScripts() .getStoredProcedure(sprocId) .execute(null, options) .flatMap(executeResponse -> { - logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + System.out.println(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", sprocId, executeResponse.getResponseAsString(), executeResponse.getStatusCode(), - //executeResponse.getRequestLatency().toString(), executeResponse.getRequestCharge())); return Mono.empty(); }).block(); } public void deleteStoredProcedure() throws Exception { - logger.info("-Deleting stored procedure...\n"); + System.out.println("-Deleting stored procedure...\n"); container.getScripts() .getStoredProcedure(sprocId) .delete().block(); - logger.info("-Closing client instance...\n"); + System.out.println("-Deleting database...\n"); + database.delete().block(); + System.out.println("-Closing client instance...\n"); client.close(); } } diff --git a/src/main/resources/log4j2.properties b/src/main/resources/log4j2.properties index dd96ec9..43f940f 100644 --- a/src/main/resources/log4j2.properties +++ b/src/main/resources/log4j2.properties @@ -1,17 +1,17 @@ # this is the log4j configuration for tests # Set root logger level to WARN and its appender to STDOUT. -rootLogger.level = warn +rootLogger.level = INFO rootLogger.appenderRef.stdout.ref = STDOUT logger.netty.name = io.netty -logger.netty.level = info +logger.netty.level = INFO logger.reactor.name = io.reactivex -logger.reactor.level = info +logger.reactor.level = INFO -logger.cosmos.name = com.azure.data.cosmos.rx.examples.multimaster -logger.cosmos.level = info +logger.cosmos.name = com.azure.cosmos +logger.cosmos.level = INFO # STDOUT is a ConsoleAppender and uses PatternLayout. appender.console.name = STDOUT From 7817463ce218d2354c773ee8c18d8ec1197697fd Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 16:04:26 -0700 Subject: [PATCH 029/110] Improved indexing example. --- .../async/SampleIndexManagementAsync.java | 50 +++++++++++-------- .../sync/SampleIndexManagement.java | 43 +++++++++------- 2 files changed, 52 insertions(+), 41 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java index 0148c8c..305d4c5 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -47,22 +47,26 @@ public static void main(String[] args) { SampleIndexManagementAsync p = new SampleIndexManagementAsync(); try { - logger.info("Starting ASYNC main"); - p.getStartedDemo(); - logger.info("Demo complete, please hold while resources are released"); + System.out.println("Starting ASYNC main"); + p.indexManagementDemo(); + System.out.println("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - logger.error(String.format("Cosmos getStarted failed with %s", e)); + System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { - logger.info("Closing the client"); + System.out.println("Closing the client"); p.close(); } } // - private void getStartedDemo() throws Exception { - logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + private void indexManagementDemo() throws Exception { + //This sample is similar to SampleCRUDQuickstartAsync, but modified to show indexing capabilities of Cosmos DB. + //Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of + //indexing capabilities. + + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -81,6 +85,8 @@ private void getStartedDemo() throws Exception { // createDatabaseIfNotExists(); + + //Here is where index management is performed createContainerIfNotExistsWithSpecifiedIndex(); Family andersenFamilyItem=Families.getAndersenFamilyItem(); @@ -101,29 +107,29 @@ private void getStartedDemo() throws Exception { johnsonFamilyItem, smithFamilyItem); - logger.info("Reading items."); + System.out.println("Reading items."); readItems(familiesToCreate); - logger.info("Querying items."); + System.out.println("Querying items."); queryItems(); } private void createDatabaseIfNotExists() throws Exception { - logger.info("Create database " + databaseName + " if not exists."); + System.out.println("Create database " + databaseName + " if not exists."); // Create database if not exists // Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); databaseIfNotExists.flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); - logger.info("Checking database " + database.getId() + " completed!\n"); + System.out.println("Checking database " + database.getId() + " completed!\n"); return Mono.empty(); }).block(); // } private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { - logger.info("Create container " + containerName + " if not exists."); + System.out.println("Create container " + containerName + " if not exists."); // Create container if not exists // @@ -191,7 +197,7 @@ private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { // Create container with 400 RU/s containerIfNotExists.flatMap(containerResponse -> { container = containerResponse.getContainer(); - logger.info("Checking container " + container.getId() + " completed!\n"); + System.out.println("Checking container " + container.getId() + " completed!\n"); return Mono.empty(); }).block(); @@ -209,17 +215,17 @@ private void createFamilies(Flux families) throws Exception { return container.createItem(family); }) //Flux of item request responses .flatMap(itemResponse -> { - logger.info(String.format("Created item with request charge of %.2f within" + + System.out.println(String.format("Created item with request charge of %.2f within" + " duration %s", itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - logger.info(String.format("Item ID: %s\n", itemResponse.getResource().getId())); + System.out.println(String.format("Item ID: %s\n", itemResponse.getResource().getId())); return Mono.just(itemResponse.getRequestCharge()); }) //Flux of request charges .reduce(0.0, (charge_n,charge_nplus1) -> charge_n + charge_nplus1 ) //Mono of total charge - there will be only one item in this stream .subscribe(charge -> { - logger.info(String.format("Created items with total request charge of %.2f\n", + System.out.println(String.format("Created items with total request charge of %.2f\n", charge)); }, err -> { @@ -227,7 +233,7 @@ private void createFamilies(Flux families) throws Exception { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.info(String.format("Read Item failed with %s\n", cerr)); + System.out.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -262,7 +268,7 @@ private void readItems(Flux familiesToCreate) { itemResponse -> { double requestCharge = itemResponse.getRequestCharge(); Duration requestLatency = itemResponse.getRequestLatency(); - logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", itemResponse.getResource().getId(), requestCharge, requestLatency)); }, err -> { @@ -270,7 +276,7 @@ private void readItems(Flux familiesToCreate) { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.info(String.format("Read Item failed with %s\n", cerr)); + System.out.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -306,11 +312,11 @@ private void queryItems() { pagedFluxResponse.byPage().subscribe( fluxResponse -> { - logger.info("Got a page of query result with " + + System.out.println("Got a page of query result with " + fluxResponse.getResults().size() + " items(s)" + " and request charge of " + fluxResponse.getRequestCharge()); - logger.info("Item Ids " + fluxResponse + System.out.println("Item Ids " + fluxResponse .getResults() .stream() .map(Family::getId) @@ -321,7 +327,7 @@ private void queryItems() { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.error(String.format("Read Item failed with %s\n", cerr)); + System.err.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java index 1a1b1cb..6b27514 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -41,25 +41,30 @@ public void close() { */ //
public static void main(String[] args) { + SampleIndexManagement p = new SampleIndexManagement(); try { - logger.info("Starting SYNC main"); - p.getStartedDemo(); - logger.info("Demo complete, please hold while resources are released"); + System.out.println("Starting SYNC main"); + p.indexManagementDemo(); + System.out.println("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - logger.error(String.format("Cosmos getStarted failed with %s", e)); + System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { - logger.info("Closing the client"); + System.out.println("Closing the client"); p.close(); } } //
- private void getStartedDemo() throws Exception { - logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + private void indexManagementDemo() throws Exception { + //This sample is similar to SampleCRUDQuickstart, but modified to show indexing capabilities of Cosmos DB. + //Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of + //indexing capabilities. + + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -78,9 +83,9 @@ private void getStartedDemo() throws Exception { //
createDatabaseIfNotExists(); - createContainerIfNotExistsWithSpecifiedIndex(); - + //Here is where index management is performed + createContainerIfNotExistsWithSpecifiedIndex(); // Setup family items to create ArrayList familiesToCreate = new ArrayList<>(); @@ -91,26 +96,26 @@ private void getStartedDemo() throws Exception { createFamilies(familiesToCreate); - logger.info("Reading items."); + System.out.println("Reading items."); readItems(familiesToCreate); - logger.info("Querying items."); + System.out.println("Querying items."); queryItems(); } private void createDatabaseIfNotExists() throws Exception { - logger.info("Create database " + databaseName + " if not exists."); + System.out.println("Create database " + databaseName + " if not exists."); // Create database if not exists // database = client.createDatabaseIfNotExists(databaseName).getDatabase(); // - logger.info("Checking database " + database.getId() + " completed!\n"); + System.out.println("Checking database " + database.getId() + " completed!\n"); } private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { - logger.info("Create container " + containerName + " if not exists."); + System.out.println("Create container " + containerName + " if not exists."); // Create container if not exists CosmosContainerProperties containerProperties = @@ -175,7 +180,7 @@ private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { // Create container with 400 RU/s container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); - logger.info("Checking container " + container.getId() + " completed!\n"); + System.out.println("Checking container " + container.getId() + " completed!\n"); } private void createFamilies(List families) throws Exception { @@ -192,12 +197,12 @@ private void createFamilies(List families) throws Exception { // // Get request charge and other properties like latency, and diagnostics strings, etc. - logger.info(String.format("Created item with request charge of %.2f within" + + System.out.println(String.format("Created item with request charge of %.2f within" + " duration %s", item.getRequestCharge(), item.getRequestLatency())); totalRequestCharge += item.getRequestCharge(); } - logger.info(String.format("Created %d items with total request " + + System.out.println(String.format("Created %d items with total request " + "charge of %.2f", families.size(), totalRequestCharge)); @@ -234,11 +239,11 @@ private void queryItems() { "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { - logger.info("Got a page of query result with " + + System.out.println("Got a page of query result with " + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); - logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + System.out.println("Item Ids " + cosmosItemPropertiesFeedResponse .getResults() .stream() .map(Family::getId) From e7373f7fed7247e7e9dda1b4ad2c3aa52afd6673 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 16:18:19 -0700 Subject: [PATCH 030/110] Converted logger calls into println calls --- .../async/SampleCRUDQuickstartAsync.java | 40 +++++++-------- .../sync/SampleCRUDQuickstart.java | 51 +++++++++++++------ 2 files changed, 55 insertions(+), 36 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index bb1637e..377e3a7 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -46,15 +46,15 @@ public static void main(String[] args) { SampleCRUDQuickstartAsync p = new SampleCRUDQuickstartAsync(); try { - logger.info("Starting ASYNC main"); + System.out.println("Starting ASYNC main"); System.out.println("got here.\n"); p.getStartedDemo(); - logger.info("Demo complete, please hold while resources are released"); + System.out.println("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - logger.error(String.format("Cosmos getStarted failed with %s", e)); + System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { - logger.info("Closing the client"); + System.out.println("Closing the client"); p.close(); } } @@ -62,7 +62,7 @@ public static void main(String[] args) { // private void getStartedDemo() throws Exception { - logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -101,29 +101,29 @@ private void getStartedDemo() throws Exception { johnsonFamilyItem, smithFamilyItem); - logger.info("Reading items."); + System.out.println("Reading items."); readItems(familiesToCreate); - logger.info("Querying items."); + System.out.println("Querying items."); queryItems(); } private void createDatabaseIfNotExists() throws Exception { - logger.info("Create database " + databaseName + " if not exists."); + System.out.println("Create database " + databaseName + " if not exists."); // Create database if not exists // Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); databaseIfNotExists.flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); - logger.info("Checking database " + database.getId() + " completed!\n"); + System.out.println("Checking database " + database.getId() + " completed!\n"); return Mono.empty(); }).block(); // } private void createContainerIfNotExists() throws Exception { - logger.info("Create container " + containerName + " if not exists."); + System.out.println("Create container " + containerName + " if not exists."); // Create container if not exists // @@ -134,7 +134,7 @@ private void createContainerIfNotExists() throws Exception { // Create container with 400 RU/s containerIfNotExists.flatMap(containerResponse -> { container = containerResponse.getContainer(); - logger.info("Checking container " + container.getId() + " completed!\n"); + System.out.println("Checking container " + container.getId() + " completed!\n"); return Mono.empty(); }).block(); @@ -152,17 +152,17 @@ private void createFamilies(Flux families) throws Exception { return container.createItem(family); }) //Flux of item request responses .flatMap(itemResponse -> { - logger.info(String.format("Created item with request charge of %.2f within" + + System.out.println(String.format("Created item with request charge of %.2f within" + " duration %s", itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - logger.info(String.format("Item ID: %s\n", itemResponse.getResource().getId())); + System.out.println(String.format("Item ID: %s\n", itemResponse.getResource().getId())); return Mono.just(itemResponse.getRequestCharge()); }) //Flux of request charges .reduce(0.0, (charge_n,charge_nplus1) -> charge_n + charge_nplus1 ) //Mono of total charge - there will be only one item in this stream .subscribe(charge -> { - logger.info(String.format("Created items with total request charge of %.2f\n", + System.out.println(String.format("Created items with total request charge of %.2f\n", charge)); }, err -> { @@ -170,7 +170,7 @@ private void createFamilies(Flux families) throws Exception { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.error(String.format("Read Item failed with %s\n", cerr)); + System.err.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -205,7 +205,7 @@ private void readItems(Flux familiesToCreate) { itemResponse -> { double requestCharge = itemResponse.getRequestCharge(); Duration requestLatency = itemResponse.getRequestLatency(); - logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", itemResponse.getResource().getId(), requestCharge, requestLatency)); }, err -> { @@ -213,7 +213,7 @@ private void readItems(Flux familiesToCreate) { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.error(String.format("Read Item failed with %s\n", cerr)); + System.err.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -250,11 +250,11 @@ private void queryItems() { pagedFluxResponse.byPage().subscribe( fluxResponse -> { - logger.info("Got a page of query result with " + + System.out.println("Got a page of query result with " + fluxResponse.getResults().size() + " items(s)" + " and request charge of " + fluxResponse.getRequestCharge()); - logger.info("Item Ids " + fluxResponse + System.out.println("Item Ids " + fluxResponse .getResults() .stream() .map(Family::getId) @@ -265,7 +265,7 @@ private void queryItems() { //Client-specific errors CosmosClientException cerr = (CosmosClientException)err; cerr.printStackTrace(); - logger.error(String.format("Read Item failed with %s\n", cerr)); + System.err.println(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index 7fa2f0c..b37e828 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -56,22 +56,22 @@ public static void main(String[] args) { SampleCRUDQuickstart p = new SampleCRUDQuickstart(); try { - logger.info("Starting SYNC main"); + System.out.println("Starting SYNC main"); p.getStartedDemo(); - logger.info("Demo complete, please hold while resources are released"); + System.out.println("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - logger.error(String.format("Cosmos getStarted failed with %s", e)); + System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { - logger.info("Closing the client"); - p.close(); + System.out.println("Closing the client"); + p.shutdown(); } } // private void getStartedDemo() throws Exception { - logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -101,26 +101,26 @@ private void getStartedDemo() throws Exception { createFamilies(familiesToCreate); - logger.info("Reading items."); + System.out.println("Reading items."); readItems(familiesToCreate); - logger.info("Querying items."); + System.out.println("Querying items."); queryItems(); } private void createDatabaseIfNotExists() throws Exception { - logger.info("Create database " + databaseName + " if not exists."); + System.out.println("Create database " + databaseName + " if not exists."); // Create database if not exists // database = client.createDatabaseIfNotExists(databaseName).getDatabase(); // - logger.info("Checking database " + database.getId() + " completed!\n"); + System.out.println("Checking database " + database.getId() + " completed!\n"); } private void createContainerIfNotExists() throws Exception { - logger.info("Create container " + containerName + " if not exists."); + System.out.println("Create container " + containerName + " if not exists."); // Create container if not exists // @@ -131,7 +131,7 @@ private void createContainerIfNotExists() throws Exception { container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); // - logger.info("Checking container " + container.getId() + " completed!\n"); + System.out.println("Checking container " + container.getId() + " completed!\n"); } private void createFamilies(List families) throws Exception { @@ -166,11 +166,11 @@ private void readItems(ArrayList familiesToCreate) { CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); double requestCharge = item.getRequestCharge(); Duration requestLatency = item.getRequestLatency(); - logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", item.getResource().getId(), requestCharge, requestLatency)); } catch (CosmosClientException e) { e.printStackTrace(); - logger.error(String.format("Read Item failed with %s", e)); + System.out.println(String.format("Read Item failed with %s", e)); } // }); @@ -189,11 +189,11 @@ private void queryItems() { "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { - logger.info("Got a page of query result with " + + System.out.println("Got a page of query result with " + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); - logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + System.out.println("Item Ids " + cosmosItemPropertiesFeedResponse .getResults() .stream() .map(Family::getId) @@ -201,4 +201,23 @@ private void queryItems() { }); // } + + private void shutdown() { + try { + //Clean shutdown + System.out.println("Deleting Cosmos DB resources"); + System.out.println("-Deleting container..."); + if (container != null) + container.delete(); + System.out.println("-Deleting database..."); + if (database != null) + database.delete(); + System.out.println("-Closing the client..."); + } catch (Exception err) { + System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + System.out.println("Done."); + } } From 5be7ba9c1993de5db85cec8e2d6c6f9b6b87f8ea Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 16:35:25 -0700 Subject: [PATCH 031/110] Updated sync/async SampleCRUDQuickstart code to include item and resource delete --- .../async/SampleCRUDQuickstartAsync.java | 39 ++++++++++++++++++- .../sync/SampleCRUDQuickstart.java | 9 +++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 377e3a7..776dff0 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -47,7 +47,6 @@ public static void main(String[] args) { try { System.out.println("Starting ASYNC main"); - System.out.println("got here.\n"); p.getStartedDemo(); System.out.println("Demo complete, please hold while resources are released"); } catch (Exception e) { @@ -55,13 +54,21 @@ public static void main(String[] args) { System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { System.out.println("Closing the client"); - p.close(); + p.shutdown(); } } // private void getStartedDemo() throws Exception { + //This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations + //with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will + //1. Create asynchronous client, database and container instances + //2. Create (and also update) several items + //3. Perform a query over the items + //4. Delete an item + //5. Delete the Cosmos DB database and container resources and close the client. + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); @@ -94,6 +101,8 @@ private void getStartedDemo() throws Exception { johnsonFamilyItem, smithFamilyItem); + // Creates several items in the container + // Also applies an upsert operation to one of the items (create if not present, otherwise replace) createFamilies(familiesToCreate); familiesToCreate = Flux.just(andersenFamilyItem, @@ -106,6 +115,9 @@ private void getStartedDemo() throws Exception { System.out.println("Querying items."); queryItems(); + + System.out.println("Deleting an item."); + deleteItem(andersenFamilyItem); } private void createDatabaseIfNotExists() throws Exception { @@ -284,4 +296,27 @@ private void queryItems() { // } + + private void deleteItem(Family item) { + container.deleteItem(item.getId(),new PartitionKey(item.getLastName())).block(); + } + + private void shutdown() { + try { + //Clean shutdown + System.out.println("Deleting Cosmos DB resources"); + System.out.println("-Deleting container..."); + if (container != null) + container.delete(); + System.out.println("-Deleting database..."); + if (database != null) + database.delete(); + System.out.println("-Closing the client..."); + } catch (Exception err) { + System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + System.out.println("Done."); + } } diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index b37e828..a94685d 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -99,6 +99,8 @@ private void getStartedDemo() throws Exception { familiesToCreate.add(Families.getJohnsonFamilyItem()); familiesToCreate.add(Families.getSmithFamilyItem()); + // Creates several items in the container + // Also applies an upsert operation to one of the items (create if not present, otherwise replace) createFamilies(familiesToCreate); System.out.println("Reading items."); @@ -106,6 +108,9 @@ private void getStartedDemo() throws Exception { System.out.println("Querying items."); queryItems(); + + System.out.println("Delete an item."); + deleteItem(familiesToCreate.get(0)); } private void createDatabaseIfNotExists() throws Exception { @@ -202,6 +207,10 @@ private void queryItems() { // } + private void deleteItem(Family item) { + container.deleteItem(item.getId(),new PartitionKey(item.getLastName()),new CosmosItemRequestOptions()); + } + private void shutdown() { try { //Clean shutdown From d49650df5129e20c22564f5f362b5ef8fe561148 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 16:56:00 -0700 Subject: [PATCH 032/110] Added upsert to each of the CRUD examples, sync/async --- .../async/SampleCRUDQuickstartAsync.java | 30 +++++++++++++++---- .../sync/SampleCRUDQuickstart.java | 19 ++++++++++++ 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 776dff0..960c815 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -64,10 +64,11 @@ private void getStartedDemo() throws Exception { //This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations //with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will //1. Create asynchronous client, database and container instances - //2. Create (and also update) several items - //3. Perform a query over the items - //4. Delete an item - //5. Delete the Cosmos DB database and container resources and close the client. + //2. Create several items + //3. Upsert one of the items + //4. Perform a query over the items + //5. Delete an item + //6. Delete the Cosmos DB database and container resources and close the client. System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); @@ -102,9 +103,11 @@ private void getStartedDemo() throws Exception { smithFamilyItem); // Creates several items in the container - // Also applies an upsert operation to one of the items (create if not present, otherwise replace) createFamilies(familiesToCreate); + // Upsert one of the items in the container + upsertFamily(wakefieldFamilyItem); + familiesToCreate = Flux.just(andersenFamilyItem, wakefieldFamilyItem, johnsonFamilyItem, @@ -202,6 +205,23 @@ private void createFamilies(Flux families) throws Exception { // } + private void upsertFamily(Family family_to_upsert) { + //Modify a field of the family object + System.out.println(String.format("Upserting the item with id %s after modifying the isRegistered field...",family_to_upsert.getId())); + family_to_upsert.setRegistered(!family_to_upsert.isRegistered()); + + //Upsert the modified item + Mono.just(family_to_upsert).flatMap(item -> { + CosmosAsyncItemResponse item_resp = container.upsertItem(family_to_upsert).block(); + + // Get upsert request charge and other properties like latency, and diagnostics strings, etc. + System.out.println(String.format("Upserted item with request charge of %.2f within duration %s", + item_resp.getRequestCharge(), item_resp.getRequestLatency())); + + return Mono.empty(); + }).subscribe(); + } + private void readItems(Flux familiesToCreate) { // Using partition key for point read scenarios. // This will help fast look up of items because of partition key diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index a94685d..293cd35 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -71,6 +71,15 @@ public static void main(String[] args) { // private void getStartedDemo() throws Exception { + //This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations + //with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will + //1. Create synchronous client, database and container instances + //2. Create several items + //3. Upsert one of the items + //4. Perform a query over the items + //5. Delete an item + //6. Delete the Cosmos DB database and container resources and close the client. + System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); @@ -160,6 +169,16 @@ private void createFamilies(List families) throws Exception { } System.out.println(String.format("Created %d items with total request charge of %.2f", families.size(), totalRequestCharge)); + + Family family_to_upsert = families.get(0); + System.out.println(String.format("Upserting the item with id %s after modifying the isRegistered field...",family_to_upsert.getId())); + family_to_upsert.setRegistered(!family_to_upsert.isRegistered()); + + CosmosItemResponse item = container.upsertItem(family_to_upsert); + + // Get upsert request charge and other properties like latency, and diagnostics strings, etc. + System.out.println(String.format("Upserted item with request charge of %.2f within duration %s", + item.getRequestCharge(), item.getRequestLatency())); } private void readItems(ArrayList familiesToCreate) { From ba6911774bad3375848a88ef4105e07c6bc71400 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 17:05:45 -0700 Subject: [PATCH 033/110] Fixed missing subscribe() calls in async code --- .../async/SampleCRUDQuickstartAsync.java | 4 ++-- .../async/SampleIndexManagementAsync.java | 21 ++++++++++++++++++- .../sync/SampleIndexManagement.java | 21 ++++++++++++++++++- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 960c815..3e5b581 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -327,10 +327,10 @@ private void shutdown() { System.out.println("Deleting Cosmos DB resources"); System.out.println("-Deleting container..."); if (container != null) - container.delete(); + container.delete().subscribe(); System.out.println("-Deleting database..."); if (database != null) - database.delete(); + database.delete().subscribe(); System.out.println("-Closing the client..."); } catch (Exception err) { System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java index 305d4c5..b335434 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -55,7 +55,7 @@ public static void main(String[] args) { System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { System.out.println("Closing the client"); - p.close(); + p.shutdown(); } } @@ -346,4 +346,23 @@ private void queryItems() { // } + + private void shutdown() { + try { + //Clean shutdown + System.out.println("Deleting Cosmos DB resources"); + System.out.println("-Deleting container..."); + if (container != null) + container.delete().subscribe(); + System.out.println("-Deleting database..."); + if (database != null) + database.delete().subscribe(); + System.out.println("-Closing the client..."); + } catch (Exception err) { + System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + System.out.println("Done."); + } } diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java index 6b27514..fc07cab 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -53,7 +53,7 @@ public static void main(String[] args) { System.err.println(String.format("Cosmos getStarted failed with %s", e)); } finally { System.out.println("Closing the client"); - p.close(); + p.shutdown(); } } @@ -251,4 +251,23 @@ private void queryItems() { }); // } + + private void shutdown() { + try { + //Clean shutdown + System.out.println("Deleting Cosmos DB resources"); + System.out.println("-Deleting container..."); + if (container != null) + container.delete(); + System.out.println("-Deleting database..."); + if (database != null) + database.delete(); + System.out.println("-Closing the client..."); + } catch (Exception err) { + System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + System.out.println("Done."); + } } From 1d27d37a8ce23753ec7b44e9397f8aa1bdfbae58 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 17:14:27 -0700 Subject: [PATCH 034/110] Improved Change Feed sample comments --- .../changefeed/SampleChangeFeedProcessor.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index c15409f..149e113 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -48,10 +48,10 @@ public static void main (String[]args) { try { //This sample models an application where documents are being inserted into one container (the "feed container"), //and meanwhile another worker thread or worker application is pulling inserted documents from the feed container's Change Feed - //and operating on them in some way. For one or more workers to process the Change Feed, the workers must first contact the server + //and operating on them in some way. For one or more workers to process the Change Feed of a container, the workers must first contact the server //and "lease" access to monitor one or more partitions of the feed container. The Change Feed Processor Library //handles leasing automatically for you, however you must create a separate "lease container" where the Change Feed - //Processor Library can store and track leases. + //Processor Library can store and track leases container partitions. //Summary of the next four commands: //-Create an asynchronous Azure Cosmos DB client and database so that we can issue async requests to the DB @@ -68,7 +68,9 @@ public static void main (String[]args) { System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); - //Now, create and start the Change Feed Processor. See the implementation of getChangeFeedProcessor() for guidance + //Model of a worker thread or application which leases access to monitor one or more feed container + //partitions via the Change Feed. In a real-world application you might deploy this code in an Azure function. + //The next line causes the worker to create and start an instance of the Change Feed Processor. See the implementation of getChangeFeedProcessor() for guidance //on creating a handler for Change Feed events. In this stream, we also trigger the insertion of 10 documents on a separate //thread. changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); @@ -81,15 +83,14 @@ public static void main (String[]args) { }) .subscribe(); - //Model of a worker thread or application which leases access to monitor one or more feed container - //partitions via the Change Feed. In a real-world application you might deploy this code in an Azure function + //Worker loops while its Change Feed Processor instance asynchronously handles incoming Change Feed events from the feed container long remainingWork = WAIT_FOR_WORK; while (!isWorkCompleted && remainingWork > 0) { Thread.sleep(100); remainingWork -= 100; } - //When all documents have been processed, clean up + //In this application we have a flag isWorkCompleted indicating when all documents have been processed. When all documents have been processed, clean up if (isWorkCompleted) { if (changeFeedProcessorInstance != null) { changeFeedProcessorInstance.stop().subscribe(); From 7495a28a3fa5a94dd346799a66f5fbc53dc05135 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 9 Mar 2020 17:40:34 -0700 Subject: [PATCH 035/110] Improved Change Feed sample --- .../changefeed/SampleChangeFeedProcessor.java | 64 +++++-------------- 1 file changed, 15 insertions(+), 49 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index 149e113..c83cb82 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -73,24 +73,33 @@ public static void main (String[]args) { //The next line causes the worker to create and start an instance of the Change Feed Processor. See the implementation of getChangeFeedProcessor() for guidance //on creating a handler for Change Feed events. In this stream, we also trigger the insertion of 10 documents on a separate //thread. + System.out.println("-->START Change Feed Processor on worker (handles changes asynchronously)"); changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); changeFeedProcessorInstance.start() .subscribeOn(Schedulers.elastic()) .doOnSuccess(aVoid -> { - //Insert 10 documents into the feed container - createNewDocumentsCustomPOJO(feedContainer, 10, Duration.ofSeconds(3)); - isWorkCompleted = true; + //pass }) .subscribe(); - //Worker loops while its Change Feed Processor instance asynchronously handles incoming Change Feed events from the feed container + //These two lines model an application which is inserting ten documents into the feed container + System.out.println("-->START application that inserts documents into feed container"); + createNewDocumentsCustomPOJO(feedContainer, 10, Duration.ofSeconds(3)); + isWorkCompleted = true; + + //This loop models the Worker main loop, which spins while its Change Feed Processor instance asynchronously + //handles incoming Change Feed events from the feed container. Of course in this sample, polling + //isWorkCompleted is unnecessary because items are being added to the feed container on the same thread, and you + //can see just above isWorkCompleted is set to true. + //But conceptually the worker is part of a different thread or application than the one which is inserting + //into the feed container; so this code illustrates the worker waiting and listening for changes to the feed container long remainingWork = WAIT_FOR_WORK; while (!isWorkCompleted && remainingWork > 0) { Thread.sleep(100); remainingWork -= 100; } - //In this application we have a flag isWorkCompleted indicating when all documents have been processed. When all documents have been processed, clean up + //When all documents have been processed, clean up if (isWorkCompleted) { if (changeFeedProcessorInstance != null) { changeFeedProcessorInstance.stop().subscribe(); @@ -129,7 +138,7 @@ public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, Cosmos .writeValueAsString(document)); //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, - //as shown below. + //as shown below. Then you can operate on the POJO. CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); System.out.println("----=>id: " + pojo_doc.getId()); @@ -236,34 +245,6 @@ public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient cl return leaseContainerResponse.getContainer(); } - public static void createNewDocuments(CosmosAsyncContainer containerClient, int count, Duration delay) { - String suffix = RandomStringUtils.randomAlphabetic(10); - for (int i = 0; i <= count; i++) { - CustomPOJO document = new CustomPOJO(); - document.setId(String.format("0%d-%s", i, suffix)); - - containerClient.createItem(document).subscribe(doc -> { - try { - System.out.println("---->DOCUMENT WRITE: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(doc)); - } catch (JsonProcessingException e) { - System.err.println(String.format("Failure in processing json %s", e.getMessage())); - } - }); - - long remainingWork = delay.toMillis(); - try { - while (remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - } catch (InterruptedException iex) { - // exception caught - break; - } - } - } - public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerClient, int count, Duration delay) { String suffix = RandomStringUtils.randomAlphabetic(10); for (int i = 0; i <= count; i++) { @@ -286,19 +267,4 @@ public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerCl } } } - - public static boolean ensureWorkIsDone(Duration delay) { - long remainingWork = delay.toMillis(); - try { - while (!isWorkCompleted && remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - } catch (InterruptedException iex) { - return false; - } - - return remainingWork > 0; - } - } From 54f0ec3b49af256215427a8cc5fc23a249519527 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 10:18:49 -0700 Subject: [PATCH 036/110] Updated pom.xml to latest (public) release --- pom.xml | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/pom.xml b/pom.xml index 57eb621..461ff84 100644 --- a/pom.xml +++ b/pom.xml @@ -11,24 +11,7 @@ UTF-8 - - - azure-sdk-for-java - https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-java/maven/v1 - - - - - azure-sdk-for-java - https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-java/maven/v1 - - true - - - true - - - + @@ -77,7 +60,7 @@ com.azure azure-cosmos - 4.0.0-preview.2 + 4.0.1-beta.1 \ No newline at end of file From d66fa8c89871cc543ecd8ef6e61924c5d4e04dd6 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 10:50:46 -0700 Subject: [PATCH 037/110] Fixed issues with pom.xml; updated samples for latest jar --- pom.xml | 14 ------------- .../changefeed/SampleChangeFeedProcessor.java | 6 +++--- .../async/SampleCRUDQuickstartAsync.java | 11 +++++----- .../sync/SampleCRUDQuickstart.java | 21 +++++-------------- .../async/SampleIndexManagementAsync.java | 11 +++++----- .../sync/SampleIndexManagement.java | 7 ++++--- .../async/SampleStoredProcedureAsync.java | 4 ++-- .../sync/SampleStoredProcedure.java | 6 ++---- 8 files changed, 28 insertions(+), 52 deletions(-) diff --git a/pom.xml b/pom.xml index 461ff84..5cd487f 100644 --- a/pom.xml +++ b/pom.xml @@ -27,20 +27,6 @@ org.codehaus.mojo exec-maven-plugin 1.6.0 - - - sync - - com.azure.cosmos.sample.sync.SyncMain - - - - async - - com.azure.cosmos.sample.async.AsyncMain - - - org.apache.maven.plugins diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index c83cb82..f667334 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -6,15 +6,15 @@ import com.azure.cosmos.ConnectionPolicy; import com.azure.cosmos.ConsistencyLevel; import com.azure.cosmos.CosmosAsyncContainer; -import com.azure.cosmos.CosmosAsyncContainerResponse; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosClientBuilder; import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.CosmosContainerProperties; -import com.azure.cosmos.CosmosContainerRequestOptions; import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.examples.common.CustomPOJO; import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 3e5b581..58618a3 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -9,6 +9,7 @@ import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.*; import com.google.common.collect.Lists; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -170,7 +171,7 @@ private void createFamilies(Flux families) throws Exception { System.out.println(String.format("Created item with request charge of %.2f within" + " duration %s", itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - System.out.println(String.format("Item ID: %s\n", itemResponse.getResource().getId())); + System.out.println(String.format("Item ID: %s\n", itemResponse.getItem().getId())); return Mono.just(itemResponse.getRequestCharge()); }) //Flux of request charges .reduce(0.0, @@ -238,7 +239,7 @@ private void readItems(Flux familiesToCreate) { double requestCharge = itemResponse.getRequestCharge(); Duration requestLatency = itemResponse.getRequestLatency(); System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", - itemResponse.getResource().getId(), requestCharge, requestLatency)); + itemResponse.getItem().getId(), requestCharge, requestLatency)); }, err -> { if (err instanceof CosmosClientException) { @@ -270,12 +271,12 @@ private void queryItems() { // Set some common query options FeedOptions queryOptions = new FeedOptions(); - queryOptions.maxItemCount(10); + queryOptions.setMaxItemCount(10); //queryOptions.setEnableCrossPartitionQuery(true); //No longer needed in SDK v4 // Set populate query metrics to get metrics around query executions - queryOptions.populateQueryMetrics(true); + queryOptions.setPopulateQueryMetrics(true); - CosmosContinuablePagedFlux pagedFluxResponse = container.queryItems( + CosmosPagedFlux pagedFluxResponse = container.queryItems( "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); final CountDownLatch completionLatch = new CountDownLatch(1); diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index 293cd35..9d3d827 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -3,23 +3,12 @@ package com.azure.cosmos.examples.crudquickstart.sync; -import com.azure.cosmos.ConnectionPolicy; -import com.azure.cosmos.ConsistencyLevel; -import com.azure.cosmos.CosmosClient; -import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.CosmosContainer; -import com.azure.cosmos.CosmosContainerProperties; -import com.azure.cosmos.CosmosContinuablePagedIterable; -import com.azure.cosmos.CosmosDatabase; -import com.azure.cosmos.CosmosItemRequestOptions; -import com.azure.cosmos.CosmosItemResponse; -import com.azure.cosmos.FeedOptions; -import com.azure.cosmos.PartitionKey; +import com.azure.cosmos.*; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.*; import com.google.common.collect.Lists; import java.time.Duration; @@ -204,12 +193,12 @@ private void queryItems() { // // Set some common query options FeedOptions queryOptions = new FeedOptions(); - queryOptions.maxItemCount(10); + queryOptions.setMaxItemCount(10); //queryOptions.setEnableCrossPartitionQuery(true); //No longer necessary in SDK v4 // Set populate query metrics to get metrics around query executions - queryOptions.populateQueryMetrics(true); + queryOptions.setPopulateQueryMetrics(true); - CosmosContinuablePagedIterable familiesPagedIterable = container.queryItems( + CosmosPagedIterable familiesPagedIterable = container.queryItems( "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java index b335434..7d5e5ea 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -8,6 +8,7 @@ import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.*; import com.google.common.collect.Lists; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -218,7 +219,7 @@ private void createFamilies(Flux families) throws Exception { System.out.println(String.format("Created item with request charge of %.2f within" + " duration %s", itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - System.out.println(String.format("Item ID: %s\n", itemResponse.getResource().getId())); + System.out.println(String.format("Item ID: %s\n", itemResponse.getItem().getId())); return Mono.just(itemResponse.getRequestCharge()); }) //Flux of request charges .reduce(0.0, @@ -269,7 +270,7 @@ private void readItems(Flux familiesToCreate) { double requestCharge = itemResponse.getRequestCharge(); Duration requestLatency = itemResponse.getRequestLatency(); System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", - itemResponse.getResource().getId(), requestCharge, requestLatency)); + itemResponse.getItem().getId(), requestCharge, requestLatency)); }, err -> { if (err instanceof CosmosClientException) { @@ -301,11 +302,11 @@ private void queryItems() { // Set some common query options FeedOptions queryOptions = new FeedOptions(); - queryOptions.maxItemCount(10); + queryOptions.setMaxItemCount(10); // Set populate query metrics to get metrics around query executions - queryOptions.populateQueryMetrics(true); + queryOptions.setPopulateQueryMetrics(true); - CosmosContinuablePagedFlux pagedFluxResponse = container.queryItems( + CosmosPagedFlux pagedFluxResponse = container.queryItems( "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); final CountDownLatch completionLatch = new CountDownLatch(1); diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java index fc07cab..1b459df 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -8,6 +8,7 @@ import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.*; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -231,11 +232,11 @@ private void queryItems() { // // Set some common query options FeedOptions queryOptions = new FeedOptions(); - queryOptions.maxItemCount(10); + queryOptions.setMaxItemCount(10); // Set populate query metrics to get metrics around query executions - queryOptions.populateQueryMetrics(true); + queryOptions.setPopulateQueryMetrics(true); - CosmosContinuablePagedIterable familiesPagedIterable = container.queryItems( + CosmosPagedIterable familiesPagedIterable = container.queryItems( "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java index 2ac24fb..eaf23bb 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -12,10 +12,10 @@ import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.CustomPOJO; +import com.azure.cosmos.models.*; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.util.UUID; @@ -144,7 +144,7 @@ public void createStoredProcedure() throws Exception { private void readAllSprocs() throws Exception { FeedOptions feedOptions = new FeedOptions(); - CosmosContinuablePagedFlux fluxResponse = + CosmosPagedFlux fluxResponse = container.getScripts().readAllStoredProcedures(feedOptions); final CountDownLatch completionLatch = new CountDownLatch(1); diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java index fe96c54..4524507 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -7,13 +7,11 @@ import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.CustomPOJO; -import com.azure.cosmos.implementation.Utils; -import com.fasterxml.jackson.databind.ObjectMapper; +import com.azure.cosmos.models.*; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.UUID; import java.util.Iterator; public class SampleStoredProcedure { @@ -133,7 +131,7 @@ private void readAllSprocs() throws Exception { System.out.println("Listing all stored procedures associated with container " + containerName + "\n"); FeedOptions feedOptions = new FeedOptions(); - CosmosContinuablePagedIterable feedResponseIterable = + CosmosPagedIterable feedResponseIterable = container.getScripts().readAllStoredProcedures(feedOptions); Iterator feedResponseIterator = feedResponseIterable.iterator(); From f86de9b1ea6697abb15f528a3a7884121d4a0baa Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 11:01:54 -0700 Subject: [PATCH 038/110] Updated README.md with current package names for executing samples via Maven command line --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 829a2ee..b110ea0 100644 --- a/README.md +++ b/README.md @@ -59,17 +59,17 @@ which gives the sample read/write access to your account. To choose which sample will run, populate the **Main class** field of the Configuration with ``` -com.azure.cosmos.examples.changefeed.sample +com.azure.cosmos.examples.sample.synchronicity.MainClass ``` -where *sample* can be -* SampleCRUDQuickstart -* SampleCRUDQuickstartAsync -* SampleIndexManagement -* SampleIndexManagementAsync -* SampleStoredProcedure -* SampleStoredProcedureAsync -* SampleChangeFeedProcessor +where *sample.synchronicity.MainClass* can be +* crudquickstart.sync.SampleCRUDQuickstart +* crudquickstart.async.SampleCRUDQuickstartAsync +* indexmanagement.sync.SampleIndexManagement +* indexmanagement.async.SampleIndexManagementAsync +* storedprocedure.sync.SampleStoredProcedure +* storedprocedure.async.SampleStoredProcedureAsync +* changefeed.SampleChangeFeedProcessor *(Changefeed has only an async sample, no sync sample.)* *Build and execute from command line without an IDE:* From top-level directory of repo: ``` From 70f951e5b7824bf0ef8205984bc53b28b090a3d8 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 11:55:05 -0700 Subject: [PATCH 039/110] Applying Mo's suggested changes: code cleanup esp. imports & licensing header; applying Kushagra's fixes from PRBranch: logger.info and logger.warm used in place of System println --- pom.xml | 112 +++++--- .../changefeed/SampleChangeFeedProcessor.java | 87 +++--- .../changefeed/SampleConfigurations.java | 16 +- .../examples/common/AccountSettings.java | 4 +- .../cosmos/examples/common/CustomPOJO.java | 5 +- .../cosmos/examples/common/Families.java | 10 +- .../async/SampleCRUDQuickstartAsync.java | 265 ++++++++++-------- .../sync/SampleCRUDQuickstart.java | 106 +++---- .../async/SampleIndexManagementAsync.java | 111 +++++--- .../sync/SampleIndexManagement.java | 66 +++-- .../async/SampleStoredProcedureAsync.java | 160 ++++++----- .../sync/SampleStoredProcedure.java | 140 ++++----- src/main/resources/log4j2.properties | 27 +- 13 files changed, 605 insertions(+), 504 deletions(-) diff --git a/pom.xml b/pom.xml index 5cd487f..69ece04 100644 --- a/pom.xml +++ b/pom.xml @@ -1,52 +1,72 @@ - 4.0.0 + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + 4.0.0 - com.azure - azure-cosmos-java-sql-api-samples - 1.0-SNAPSHOT - Get Started With Sync / Async Java SDK for SQL API of Azure Cosmos DB Database Service - - UTF-8 - + com.azure + azure-cosmos-java-sql-api-samples + 1.0-SNAPSHOT + Get Started With Sync / Async Java SDK for SQL API of Azure Cosmos DB Database Service + + + UTF-8 + - - - - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - org.apache.maven.plugins - maven-eclipse-plugin - 2.8 - - - - org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 - - - - - - - - - com.azure - azure-cosmos - 4.0.1-beta.1 - - + + + + maven-compiler-plugin + 3.1 + + 1.8 + 1.8 + + + + org.codehaus.mojo + exec-maven-plugin + 1.6.0 + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + + + + com.azure + azure-cosmos + 4.0.1-beta.1 + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.13.0 + test + + + + org.apache.logging.log4j + log4j-api + 2.11.1 + test + + + + org.slf4j + slf4j-jdk14 + 1.7.28 + + \ No newline at end of file diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index f667334..5a28566 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -5,11 +5,11 @@ import com.azure.cosmos.ChangeFeedProcessor; import com.azure.cosmos.ConnectionPolicy; import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosAsyncContainer; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosClientBuilder; import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.examples.common.CustomPOJO; import com.azure.cosmos.implementation.Utils; import com.azure.cosmos.models.CosmosAsyncContainerResponse; @@ -28,7 +28,6 @@ /** * Sample for Change Feed Processor. - * */ public class SampleChangeFeedProcessor { @@ -42,7 +41,7 @@ public class SampleChangeFeedProcessor { private static ChangeFeedProcessor changeFeedProcessorInstance; private static boolean isWorkCompleted = false; - public static void main (String[]args) { + public static void main(String[] args) { logger.info("BEGIN Sample"); try { @@ -56,16 +55,16 @@ public static void main (String[]args) { //Summary of the next four commands: //-Create an asynchronous Azure Cosmos DB client and database so that we can issue async requests to the DB //-Create a "feed container" and a "lease container" in the DB - System.out.println("-->CREATE DocumentClient"); + logger.info("-->CREATE DocumentClient"); CosmosAsyncClient client = getCosmosClient(); - System.out.println("-->CREATE sample's database: " + DATABASE_NAME); + logger.info("-->CREATE sample's database: " + DATABASE_NAME); CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); - System.out.println("-->CREATE container for documents: " + COLLECTION_NAME); + logger.info("-->CREATE container for documents: " + COLLECTION_NAME); CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME); - System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); + logger.info("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); //Model of a worker thread or application which leases access to monitor one or more feed container @@ -73,17 +72,17 @@ public static void main (String[]args) { //The next line causes the worker to create and start an instance of the Change Feed Processor. See the implementation of getChangeFeedProcessor() for guidance //on creating a handler for Change Feed events. In this stream, we also trigger the insertion of 10 documents on a separate //thread. - System.out.println("-->START Change Feed Processor on worker (handles changes asynchronously)"); + logger.info("-->START Change Feed Processor on worker (handles changes asynchronously)"); changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); changeFeedProcessorInstance.start() - .subscribeOn(Schedulers.elastic()) - .doOnSuccess(aVoid -> { - //pass - }) - .subscribe(); + .subscribeOn(Schedulers.elastic()) + .doOnSuccess(aVoid -> { + //pass + }) + .subscribe(); //These two lines model an application which is inserting ten documents into the feed container - System.out.println("-->START application that inserts documents into feed container"); + logger.info("-->START application that inserts documents into feed container"); createNewDocumentsCustomPOJO(feedContainer, 10, Duration.ofSeconds(3)); isWorkCompleted = true; @@ -108,7 +107,7 @@ public static void main (String[]args) { throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); } - System.out.println("-->DELETE sample's database: " + DATABASE_NAME); + logger.info("-->DELETE sample's database: " + DATABASE_NAME); deleteDatabase(cosmosDatabase); Thread.sleep(500); @@ -117,39 +116,39 @@ public static void main (String[]args) { e.printStackTrace(); } - System.out.println("END Sample"); + logger.info("END Sample"); } public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { return ChangeFeedProcessor.changeFeedProcessorBuilder() - .setHostName(hostName) - .setFeedContainer(feedContainer) - .setLeaseContainer(leaseContainer) - .setHandleChanges((List docs) -> { - System.out.println("--->setHandleChanges() START"); - - for (JsonNode document : docs) { - try { - //Change Feed hands the document to you in the form of a JsonNode - //As a developer you have two options for handling the JsonNode document provided to you by Change Feed - //One option is to operate on the document in the form of a JsonNode, as shown below. This is great - //especially if you do not have a single uniform data model for all documents. - System.out.println("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(document)); - - //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, - //as shown below. Then you can operate on the POJO. - CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); - System.out.println("----=>id: " + pojo_doc.getId()); - - } catch (JsonProcessingException e) { - e.printStackTrace(); + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + logger.info("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + try { + //Change Feed hands the document to you in the form of a JsonNode + //As a developer you have two options for handling the JsonNode document provided to you by Change Feed + //One option is to operate on the document in the form of a JsonNode, as shown below. This is great + //especially if you do not have a single uniform data model for all documents. + logger.info("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + + //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, + //as shown below. Then you can operate on the POJO. + CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.getId()); + + } catch (JsonProcessingException e) { + e.printStackTrace(); + } } - } - System.out.println("--->handleChanges() END"); + logger.info("--->handleChanges() END"); - }) - .build(); + }) + .build(); } public static CosmosAsyncClient getCosmosClient() { @@ -236,7 +235,7 @@ public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient cl CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); - leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); + leaseContainerResponse = databaseLink.createContainer(containerSettings, 400, requestOptions).block(); if (leaseContainerResponse == null) { throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); @@ -252,7 +251,7 @@ public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerCl document.setId(String.format("0%d-%s", i, suffix)); containerClient.createItem(document).subscribe(doc -> { - System.out.println("---->DOCUMENT WRITE: " + doc); + logger.info("---->DOCUMENT WRITE: " + doc); }); long remainingWork = delay.toMillis(); diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java index 86e75c7..f373f7e 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java @@ -23,14 +23,14 @@ public final class SampleConfigurations { // The default values are credentials of the local emulator, which are not used in any production environment. // public static String MASTER_KEY = - System.getProperty("ACCOUNT_KEY", - StringUtils.defaultString(Strings.emptyToNull( - System.getenv().get("ACCOUNT_KEY")), - "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); public static String HOST = - System.getProperty("ACCOUNT_HOST", - StringUtils.defaultString(Strings.emptyToNull( - System.getenv().get("ACCOUNT_HOST")), - "https://localhost:8081/")); + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:8081/")); } diff --git a/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java b/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java index 12bb3ba..64a7776 100644 --- a/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java +++ b/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java @@ -7,7 +7,7 @@ /** * Contains the account configurations for Sample. - * + *

* For running tests, you can pass a customized endpoint configuration in one of the following * ways: *

    @@ -15,7 +15,7 @@ * command-line option. *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • *
- * + *

* If none of the above is set, emulator endpoint will be used. * Emulator http cert is self signed. If you are using emulator, * make sure emulator https certificate is imported diff --git a/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java b/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java index 7704081..0341d1a 100644 --- a/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java +++ b/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + package com.azure.cosmos.examples.common; public class CustomPOJO { @@ -8,7 +11,7 @@ public CustomPOJO() { } public CustomPOJO(String id) { - this.id=id; + this.id = id; } public String getId() { diff --git a/src/main/java/com/azure/cosmos/examples/common/Families.java b/src/main/java/com/azure/cosmos/examples/common/Families.java index fd549ee..1f658ae 100644 --- a/src/main/java/com/azure/cosmos/examples/common/Families.java +++ b/src/main/java/com/azure/cosmos/examples/common/Families.java @@ -16,7 +16,7 @@ public static Family getAndersenFamilyItem() { Parent parent2 = new Parent(); parent2.setFirstName("Mary Kay"); - andersenFamily.setParents(new Parent[] { parent1, parent2 }); + andersenFamily.setParents(new Parent[]{parent1, parent2}); Child child1 = new Child(); child1.setFirstName("Henriette Thaulow"); @@ -26,7 +26,7 @@ public static Family getAndersenFamilyItem() { Pet pet1 = new Pet(); pet1.setGivenName("Fluffy"); - child1.setPets(new Pet[] { pet1 }); + child1.setPets(new Pet[]{pet1}); andersenFamily.setDistrict("WA5"); Address address = new Address(); @@ -53,7 +53,7 @@ public static Family getWakefieldFamilyItem() { parent2.setFamilyName("Miller"); parent2.setFirstName("Ben"); - wakefieldFamily.setParents(new Parent[] { parent1, parent2 }); + wakefieldFamily.setParents(new Parent[]{parent1, parent2}); Child child1 = new Child(); child1.setFirstName("Jesse"); @@ -66,7 +66,7 @@ public static Family getWakefieldFamilyItem() { Pet pet2 = new Pet(); pet2.setGivenName("Shadow"); - child1.setPets(new Pet[] { pet1, pet2 }); + child1.setPets(new Pet[]{pet1, pet2}); Child child2 = new Child(); child2.setFirstName("Lisa"); @@ -74,7 +74,7 @@ public static Family getWakefieldFamilyItem() { child2.setGrade(1); child2.setGender("female"); - wakefieldFamily.setChildren(new Child[] { child1, child2 }); + wakefieldFamily.setChildren(new Child[]{child1, child2}); Address address = new Address(); address.setCity("NY"); diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 58618a3..02c9c1e 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -4,13 +4,27 @@ package com.azure.cosmos.examples.crudquickstart.async; -import com.azure.cosmos.*; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosPagedFlux; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; -import com.azure.cosmos.models.*; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosAsyncDatabaseResponse; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -18,9 +32,6 @@ import java.util.concurrent.CountDownLatch; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class SampleCRUDQuickstartAsync { private CosmosAsyncClient client; @@ -47,14 +58,14 @@ public static void main(String[] args) { SampleCRUDQuickstartAsync p = new SampleCRUDQuickstartAsync(); try { - System.out.println("Starting ASYNC main"); + logger.info("Starting ASYNC main"); p.getStartedDemo(); - System.out.println("Demo complete, please hold while resources are released"); + logger.info("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - System.err.println(String.format("Cosmos getStarted failed with %s", e)); + logger.error(String.format("Cosmos getStarted failed with %s", e)); } finally { - System.out.println("Closing the client"); + logger.info("Closing the client"); p.shutdown(); } } @@ -71,7 +82,7 @@ private void getStartedDemo() throws Exception { //5. Delete an item //6. Delete the Cosmos DB database and container resources and close the client. - System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -81,27 +92,27 @@ private void getStartedDemo() throws Exception { // Create async client // client = new CosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(defaultPolicy) - .setConsistencyLevel(ConsistencyLevel.EVENTUAL) - .buildAsyncClient(); + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); // createDatabaseIfNotExists(); createContainerIfNotExists(); - Family andersenFamilyItem=Families.getAndersenFamilyItem(); - Family wakefieldFamilyItem=Families.getWakefieldFamilyItem(); - Family johnsonFamilyItem=Families.getJohnsonFamilyItem(); - Family smithFamilyItem=Families.getSmithFamilyItem(); + Family andersenFamilyItem = Families.getAndersenFamilyItem(); + Family wakefieldFamilyItem = Families.getWakefieldFamilyItem(); + Family johnsonFamilyItem = Families.getJohnsonFamilyItem(); + Family smithFamilyItem = Families.getSmithFamilyItem(); // Setup family items to create Flux familiesToCreate = Flux.just(andersenFamilyItem, - wakefieldFamilyItem, - johnsonFamilyItem, - smithFamilyItem); + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); // Creates several items in the container createFamilies(familiesToCreate); @@ -110,36 +121,36 @@ private void getStartedDemo() throws Exception { upsertFamily(wakefieldFamilyItem); familiesToCreate = Flux.just(andersenFamilyItem, - wakefieldFamilyItem, - johnsonFamilyItem, - smithFamilyItem); + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); - System.out.println("Reading items."); + logger.info("Reading items."); readItems(familiesToCreate); - System.out.println("Querying items."); + logger.info("Querying items."); queryItems(); - System.out.println("Deleting an item."); + logger.info("Deleting an item."); deleteItem(andersenFamilyItem); } private void createDatabaseIfNotExists() throws Exception { - System.out.println("Create database " + databaseName + " if not exists."); + logger.info("Create database " + databaseName + " if not exists."); // Create database if not exists // Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); databaseIfNotExists.flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); - System.out.println("Checking database " + database.getId() + " completed!\n"); + logger.info("Checking database " + database.getId() + " completed!\n"); return Mono.empty(); }).block(); // } private void createContainerIfNotExists() throws Exception { - System.out.println("Create container " + containerName + " if not exists."); + logger.info("Create container " + containerName + " if not exists."); // Create container if not exists // @@ -150,7 +161,7 @@ private void createContainerIfNotExists() throws Exception { // Create container with 400 RU/s containerIfNotExists.flatMap(containerResponse -> { container = containerResponse.getContainer(); - System.out.println("Checking container " + container.getId() + " completed!\n"); + logger.info("Checking container " + container.getId() + " completed!\n"); return Mono.empty(); }).block(); @@ -165,42 +176,44 @@ private void createFamilies(Flux families) throws Exception { // Combine multiple item inserts, associated success println's, and a final aggregate stats println into one Reactive stream. families.flatMap(family -> { - return container.createItem(family); - }) //Flux of item request responses - .flatMap(itemResponse -> { - System.out.println(String.format("Created item with request charge of %.2f within" + - " duration %s", - itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - System.out.println(String.format("Item ID: %s\n", itemResponse.getItem().getId())); - return Mono.just(itemResponse.getRequestCharge()); - }) //Flux of request charges - .reduce(0.0, - (charge_n,charge_nplus1) -> charge_n + charge_nplus1 - ) //Mono of total charge - there will be only one item in this stream - .subscribe(charge -> { - System.out.println(String.format("Created items with total request charge of %.2f\n", - charge)); - }, - err -> { - if (err instanceof CosmosClientException) { - //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; - cerr.printStackTrace(); - System.err.println(String.format("Read Item failed with %s\n", cerr)); - } else { - //General errors - err.printStackTrace(); - } - - completionLatch.countDown(); - }, - () -> {completionLatch.countDown();} - ); //Preserve the total charge and print aggregate charge/item count stats. + return container.createItem(family); + }) //Flux of item request responses + .flatMap(itemResponse -> { + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + logger.info(String.format("Item ID: %s\n", itemResponse.getItem().getId())); + return Mono.just(itemResponse.getRequestCharge()); + }) //Flux of request charges + .reduce(0.0, + (charge_n, charge_nplus1) -> charge_n + charge_nplus1 + ) //Mono of total charge - there will be only one item in this stream + .subscribe(charge -> { + logger.info(String.format("Created items with total request charge of %.2f\n", + charge)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); //Preserve the total charge and print aggregate charge/item count stats. try { completionLatch.await(); } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption",err); + throw new AssertionError("Unexpected Interruption", err); } // @@ -208,7 +221,7 @@ private void createFamilies(Flux families) throws Exception { private void upsertFamily(Family family_to_upsert) { //Modify a field of the family object - System.out.println(String.format("Upserting the item with id %s after modifying the isRegistered field...",family_to_upsert.getId())); + logger.info(String.format("Upserting the item with id %s after modifying the isRegistered field...", family_to_upsert.getId())); family_to_upsert.setRegistered(!family_to_upsert.isRegistered()); //Upsert the modified item @@ -216,7 +229,7 @@ private void upsertFamily(Family family_to_upsert) { CosmosAsyncItemResponse item_resp = container.upsertItem(family_to_upsert).block(); // Get upsert request charge and other properties like latency, and diagnostics strings, etc. - System.out.println(String.format("Upserted item with request charge of %.2f within duration %s", + logger.info(String.format("Upserted item with request charge of %.2f within duration %s", item_resp.getRequestCharge(), item_resp.getRequestLatency())); return Mono.empty(); @@ -231,36 +244,38 @@ private void readItems(Flux familiesToCreate) { final CountDownLatch completionLatch = new CountDownLatch(1); familiesToCreate.flatMap(family -> { - Mono> asyncItemResponseMono = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); - return asyncItemResponseMono; - }) - .subscribe( - itemResponse -> { - double requestCharge = itemResponse.getRequestCharge(); - Duration requestLatency = itemResponse.getRequestLatency(); - System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + Mono> asyncItemResponseMono = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + return asyncItemResponseMono; + }) + .subscribe( + itemResponse -> { + double requestCharge = itemResponse.getRequestCharge(); + Duration requestLatency = itemResponse.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", itemResponse.getItem().getId(), requestCharge, requestLatency)); - }, - err -> { - if (err instanceof CosmosClientException) { - //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; - cerr.printStackTrace(); - System.err.println(String.format("Read Item failed with %s\n", cerr)); - } else { - //General errors - err.printStackTrace(); - } - - completionLatch.countDown(); - }, - () -> {completionLatch.countDown();} - ); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); try { completionLatch.await(); } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption",err); + throw new AssertionError("Unexpected Interruption", err); } // @@ -277,67 +292,69 @@ private void queryItems() { queryOptions.setPopulateQueryMetrics(true); CosmosPagedFlux pagedFluxResponse = container.queryItems( - "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); final CountDownLatch completionLatch = new CountDownLatch(1); pagedFluxResponse.byPage().subscribe( - fluxResponse -> { - System.out.println("Got a page of query result with " + - fluxResponse.getResults().size() + " items(s)" - + " and request charge of " + fluxResponse.getRequestCharge()); - - System.out.println("Item Ids " + fluxResponse - .getResults() - .stream() - .map(Family::getId) - .collect(Collectors.toList())); - }, - err -> { - if (err instanceof CosmosClientException) { - //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; - cerr.printStackTrace(); - System.err.println(String.format("Read Item failed with %s\n", cerr)); - } else { - //General errors - err.printStackTrace(); - } + fluxResponse -> { + logger.info("Got a page of query result with " + + fluxResponse.getResults().size() + " items(s)" + + " and request charge of " + fluxResponse.getRequestCharge()); + + logger.info("Item Ids " + fluxResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } - completionLatch.countDown(); - }, - () -> {completionLatch.countDown();} + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } ); try { completionLatch.await(); } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption",err); + throw new AssertionError("Unexpected Interruption", err); } // } private void deleteItem(Family item) { - container.deleteItem(item.getId(),new PartitionKey(item.getLastName())).block(); + container.deleteItem(item.getId(), new PartitionKey(item.getLastName())).block(); } private void shutdown() { try { //Clean shutdown - System.out.println("Deleting Cosmos DB resources"); - System.out.println("-Deleting container..."); + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); if (container != null) container.delete().subscribe(); - System.out.println("-Deleting database..."); + logger.info("-Deleting database..."); if (database != null) database.delete().subscribe(); - System.out.println("-Closing the client..."); + logger.info("-Closing the client..."); } catch (Exception err) { - System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); err.printStackTrace(); } client.close(); - System.out.println("Done."); + logger.info("Done."); } } diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index 9d3d827..e508f6b 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -3,22 +3,32 @@ package com.azure.cosmos.examples.crudquickstart.sync; -import com.azure.cosmos.*; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; -import com.azure.cosmos.models.*; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class SampleCRUDQuickstart { private CosmosClient client; @@ -45,14 +55,14 @@ public static void main(String[] args) { SampleCRUDQuickstart p = new SampleCRUDQuickstart(); try { - System.out.println("Starting SYNC main"); + logger.info("Starting SYNC main"); p.getStartedDemo(); - System.out.println("Demo complete, please hold while resources are released"); + logger.info("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - System.err.println(String.format("Cosmos getStarted failed with %s", e)); + logger.error(String.format("Cosmos getStarted failed with %s", e)); } finally { - System.out.println("Closing the client"); + logger.info("Closing the client"); p.shutdown(); } } @@ -69,7 +79,7 @@ private void getStartedDemo() throws Exception { //5. Delete an item //6. Delete the Cosmos DB database and container resources and close the client. - System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -79,11 +89,11 @@ private void getStartedDemo() throws Exception { // Create sync client // client = new CosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(defaultPolicy) - .setConsistencyLevel(ConsistencyLevel.EVENTUAL) - .buildClient(); + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); // @@ -101,40 +111,40 @@ private void getStartedDemo() throws Exception { // Also applies an upsert operation to one of the items (create if not present, otherwise replace) createFamilies(familiesToCreate); - System.out.println("Reading items."); + logger.info("Reading items."); readItems(familiesToCreate); - System.out.println("Querying items."); + logger.info("Querying items."); queryItems(); - System.out.println("Delete an item."); + logger.info("Delete an item."); deleteItem(familiesToCreate.get(0)); } private void createDatabaseIfNotExists() throws Exception { - System.out.println("Create database " + databaseName + " if not exists."); + logger.info("Create database " + databaseName + " if not exists."); // Create database if not exists // database = client.createDatabaseIfNotExists(databaseName).getDatabase(); // - System.out.println("Checking database " + database.getId() + " completed!\n"); + logger.info("Checking database " + database.getId() + " completed!\n"); } private void createContainerIfNotExists() throws Exception { - System.out.println("Create container " + containerName + " if not exists."); + logger.info("Create container " + containerName + " if not exists."); // Create container if not exists // CosmosContainerProperties containerProperties = - new CosmosContainerProperties(containerName, "/lastName"); + new CosmosContainerProperties(containerName, "/lastName"); // Create container with 400 RU/s container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); // - System.out.println("Checking container " + container.getId() + " completed!\n"); + logger.info("Checking container " + container.getId() + " completed!\n"); } private void createFamilies(List families) throws Exception { @@ -151,22 +161,22 @@ private void createFamilies(List families) throws Exception { // // Get request charge and other properties like latency, and diagnostics strings, etc. - System.out.println(String.format("Created item with request charge of %.2f within duration %s", - item.getRequestCharge(), item.getRequestLatency())); + logger.info(String.format("Created item with request charge of %.2f within duration %s", + item.getRequestCharge(), item.getRequestLatency())); totalRequestCharge += item.getRequestCharge(); } - System.out.println(String.format("Created %d items with total request charge of %.2f", - families.size(), totalRequestCharge)); + logger.info(String.format("Created %d items with total request charge of %.2f", + families.size(), totalRequestCharge)); Family family_to_upsert = families.get(0); - System.out.println(String.format("Upserting the item with id %s after modifying the isRegistered field...",family_to_upsert.getId())); + logger.info(String.format("Upserting the item with id %s after modifying the isRegistered field...", family_to_upsert.getId())); family_to_upsert.setRegistered(!family_to_upsert.isRegistered()); CosmosItemResponse item = container.upsertItem(family_to_upsert); // Get upsert request charge and other properties like latency, and diagnostics strings, etc. - System.out.println(String.format("Upserted item with request charge of %.2f within duration %s", + logger.info(String.format("Upserted item with request charge of %.2f within duration %s", item.getRequestCharge(), item.getRequestLatency())); } @@ -179,11 +189,11 @@ private void readItems(ArrayList familiesToCreate) { CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); double requestCharge = item.getRequestCharge(); Duration requestLatency = item.getRequestLatency(); - System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", - item.getResource().getId(), requestCharge, requestLatency)); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + item.getResource().getId(), requestCharge, requestLatency)); } catch (CosmosClientException e) { e.printStackTrace(); - System.out.println(String.format("Read Item failed with %s", e)); + logger.info(String.format("Read Item failed with %s", e)); } // }); @@ -202,39 +212,39 @@ private void queryItems() { "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { - System.out.println("Got a page of query result with " + - cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" - + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); - - System.out.println("Item Ids " + cosmosItemPropertiesFeedResponse - .getResults() - .stream() - .map(Family::getId) - .collect(Collectors.toList())); + logger.info("Got a page of query result with " + + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); + + logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); }); // } private void deleteItem(Family item) { - container.deleteItem(item.getId(),new PartitionKey(item.getLastName()),new CosmosItemRequestOptions()); + container.deleteItem(item.getId(), new PartitionKey(item.getLastName()), new CosmosItemRequestOptions()); } private void shutdown() { try { //Clean shutdown - System.out.println("Deleting Cosmos DB resources"); - System.out.println("-Deleting container..."); + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); if (container != null) container.delete(); - System.out.println("-Deleting database..."); + logger.info("-Deleting database..."); if (database != null) database.delete(); - System.out.println("-Closing the client..."); + logger.info("-Closing the client..."); } catch (Exception err) { - System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); err.printStackTrace(); } client.close(); - System.out.println("Done."); + logger.info("Done."); } } diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java index 7d5e5ea..4197819 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -3,13 +3,31 @@ package com.azure.cosmos.examples.indexmanagement.async; -import com.azure.cosmos.*; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosPagedFlux; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; -import com.azure.cosmos.models.*; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosAsyncDatabaseResponse; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.ExcludedPath; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.IncludedPath; +import com.azure.cosmos.models.IndexingMode; +import com.azure.cosmos.models.IndexingPolicy; +import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -19,9 +37,6 @@ import java.util.concurrent.CountDownLatch; import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class SampleIndexManagementAsync { private CosmosAsyncClient client; @@ -48,14 +63,14 @@ public static void main(String[] args) { SampleIndexManagementAsync p = new SampleIndexManagementAsync(); try { - System.out.println("Starting ASYNC main"); + logger.info("Starting ASYNC main"); p.indexManagementDemo(); - System.out.println("Demo complete, please hold while resources are released"); + logger.info("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - System.err.println(String.format("Cosmos getStarted failed with %s", e)); + logger.error(String.format("Cosmos getStarted failed with %s", e)); } finally { - System.out.println("Closing the client"); + logger.info("Closing the client"); p.shutdown(); } } @@ -67,7 +82,7 @@ private void indexManagementDemo() throws Exception { //Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of //indexing capabilities. - System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -90,10 +105,10 @@ private void indexManagementDemo() throws Exception { //Here is where index management is performed createContainerIfNotExistsWithSpecifiedIndex(); - Family andersenFamilyItem=Families.getAndersenFamilyItem(); - Family wakefieldFamilyItem=Families.getWakefieldFamilyItem(); - Family johnsonFamilyItem=Families.getJohnsonFamilyItem(); - Family smithFamilyItem=Families.getSmithFamilyItem(); + Family andersenFamilyItem = Families.getAndersenFamilyItem(); + Family wakefieldFamilyItem = Families.getWakefieldFamilyItem(); + Family johnsonFamilyItem = Families.getJohnsonFamilyItem(); + Family smithFamilyItem = Families.getSmithFamilyItem(); // Setup family items to create Flux familiesToCreate = Flux.just(andersenFamilyItem, @@ -108,29 +123,29 @@ private void indexManagementDemo() throws Exception { johnsonFamilyItem, smithFamilyItem); - System.out.println("Reading items."); + logger.info("Reading items."); readItems(familiesToCreate); - System.out.println("Querying items."); + logger.info("Querying items."); queryItems(); } private void createDatabaseIfNotExists() throws Exception { - System.out.println("Create database " + databaseName + " if not exists."); + logger.info("Create database " + databaseName + " if not exists."); // Create database if not exists // Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); databaseIfNotExists.flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); - System.out.println("Checking database " + database.getId() + " completed!\n"); + logger.info("Checking database " + database.getId() + " completed!\n"); return Mono.empty(); }).block(); // } private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { - System.out.println("Create container " + containerName + " if not exists."); + logger.info("Create container " + containerName + " if not exists."); // Create container if not exists // @@ -198,7 +213,7 @@ private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { // Create container with 400 RU/s containerIfNotExists.flatMap(containerResponse -> { container = containerResponse.getContainer(); - System.out.println("Checking container " + container.getId() + " completed!\n"); + logger.info("Checking container " + container.getId() + " completed!\n"); return Mono.empty(); }).block(); @@ -216,25 +231,25 @@ private void createFamilies(Flux families) throws Exception { return container.createItem(family); }) //Flux of item request responses .flatMap(itemResponse -> { - System.out.println(String.format("Created item with request charge of %.2f within" + + logger.info(String.format("Created item with request charge of %.2f within" + " duration %s", itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - System.out.println(String.format("Item ID: %s\n", itemResponse.getItem().getId())); + logger.info(String.format("Item ID: %s\n", itemResponse.getItem().getId())); return Mono.just(itemResponse.getRequestCharge()); }) //Flux of request charges .reduce(0.0, - (charge_n,charge_nplus1) -> charge_n + charge_nplus1 + (charge_n, charge_nplus1) -> charge_n + charge_nplus1 ) //Mono of total charge - there will be only one item in this stream .subscribe(charge -> { - System.out.println(String.format("Created items with total request charge of %.2f\n", + logger.info(String.format("Created items with total request charge of %.2f\n", charge)); }, err -> { if (err instanceof CosmosClientException) { //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; + CosmosClientException cerr = (CosmosClientException) err; cerr.printStackTrace(); - System.out.println(String.format("Read Item failed with %s\n", cerr)); + logger.info(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -242,13 +257,15 @@ private void createFamilies(Flux families) throws Exception { completionLatch.countDown(); }, - () -> {completionLatch.countDown();} + () -> { + completionLatch.countDown(); + } ); //Preserve the total charge and print aggregate charge/item count stats. try { completionLatch.await(); } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption",err); + throw new AssertionError("Unexpected Interruption", err); } // @@ -269,15 +286,15 @@ private void readItems(Flux familiesToCreate) { itemResponse -> { double requestCharge = itemResponse.getRequestCharge(); Duration requestLatency = itemResponse.getRequestLatency(); - System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", itemResponse.getItem().getId(), requestCharge, requestLatency)); }, err -> { if (err instanceof CosmosClientException) { //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; + CosmosClientException cerr = (CosmosClientException) err; cerr.printStackTrace(); - System.out.println(String.format("Read Item failed with %s\n", cerr)); + logger.info(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -285,13 +302,15 @@ private void readItems(Flux familiesToCreate) { completionLatch.countDown(); }, - () -> {completionLatch.countDown();} + () -> { + completionLatch.countDown(); + } ); try { completionLatch.await(); } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption",err); + throw new AssertionError("Unexpected Interruption", err); } // @@ -313,11 +332,11 @@ private void queryItems() { pagedFluxResponse.byPage().subscribe( fluxResponse -> { - System.out.println("Got a page of query result with " + + logger.info("Got a page of query result with " + fluxResponse.getResults().size() + " items(s)" + " and request charge of " + fluxResponse.getRequestCharge()); - System.out.println("Item Ids " + fluxResponse + logger.info("Item Ids " + fluxResponse .getResults() .stream() .map(Family::getId) @@ -326,9 +345,9 @@ private void queryItems() { err -> { if (err instanceof CosmosClientException) { //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; + CosmosClientException cerr = (CosmosClientException) err; cerr.printStackTrace(); - System.err.println(String.format("Read Item failed with %s\n", cerr)); + logger.error(String.format("Read Item failed with %s\n", cerr)); } else { //General errors err.printStackTrace(); @@ -336,13 +355,15 @@ private void queryItems() { completionLatch.countDown(); }, - () -> {completionLatch.countDown();} + () -> { + completionLatch.countDown(); + } ); try { completionLatch.await(); } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption",err); + throw new AssertionError("Unexpected Interruption", err); } // @@ -351,19 +372,19 @@ private void queryItems() { private void shutdown() { try { //Clean shutdown - System.out.println("Deleting Cosmos DB resources"); - System.out.println("-Deleting container..."); + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); if (container != null) container.delete().subscribe(); - System.out.println("-Deleting database..."); + logger.info("-Deleting database..."); if (database != null) database.delete().subscribe(); - System.out.println("-Closing the client..."); + logger.info("-Closing the client..."); } catch (Exception err) { - System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); err.printStackTrace(); } client.close(); - System.out.println("Done."); + logger.info("Done."); } } diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java index 1b459df..4fb1965 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -3,19 +3,33 @@ package com.azure.cosmos.examples.indexmanagement.sync; -import com.azure.cosmos.*; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Families; import com.azure.cosmos.examples.common.Family; -import com.azure.cosmos.models.*; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.ExcludedPath; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.IncludedPath; +import com.azure.cosmos.models.IndexingMode; +import com.azure.cosmos.models.IndexingPolicy; +import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.Duration; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.stream.Collectors; @@ -46,14 +60,14 @@ public static void main(String[] args) { SampleIndexManagement p = new SampleIndexManagement(); try { - System.out.println("Starting SYNC main"); + logger.info("Starting SYNC main"); p.indexManagementDemo(); - System.out.println("Demo complete, please hold while resources are released"); + logger.info("Demo complete, please hold while resources are released"); } catch (Exception e) { e.printStackTrace(); - System.err.println(String.format("Cosmos getStarted failed with %s", e)); + logger.error(String.format("Cosmos getStarted failed with %s", e)); } finally { - System.out.println("Closing the client"); + logger.info("Closing the client"); p.shutdown(); } } @@ -65,7 +79,7 @@ private void indexManagementDemo() throws Exception { //Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of //indexing capabilities. - System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -97,26 +111,26 @@ private void indexManagementDemo() throws Exception { createFamilies(familiesToCreate); - System.out.println("Reading items."); + logger.info("Reading items."); readItems(familiesToCreate); - System.out.println("Querying items."); + logger.info("Querying items."); queryItems(); } private void createDatabaseIfNotExists() throws Exception { - System.out.println("Create database " + databaseName + " if not exists."); + logger.info("Create database " + databaseName + " if not exists."); // Create database if not exists // database = client.createDatabaseIfNotExists(databaseName).getDatabase(); // - System.out.println("Checking database " + database.getId() + " completed!\n"); + logger.info("Checking database " + database.getId() + " completed!\n"); } private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { - System.out.println("Create container " + containerName + " if not exists."); + logger.info("Create container " + containerName + " if not exists."); // Create container if not exists CosmosContainerProperties containerProperties = @@ -181,7 +195,7 @@ private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { // Create container with 400 RU/s container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); - System.out.println("Checking container " + container.getId() + " completed!\n"); + logger.info("Checking container " + container.getId() + " completed!\n"); } private void createFamilies(List families) throws Exception { @@ -198,12 +212,12 @@ private void createFamilies(List families) throws Exception { // // Get request charge and other properties like latency, and diagnostics strings, etc. - System.out.println(String.format("Created item with request charge of %.2f within" + + logger.info(String.format("Created item with request charge of %.2f within" + " duration %s", item.getRequestCharge(), item.getRequestLatency())); totalRequestCharge += item.getRequestCharge(); } - System.out.println(String.format("Created %d items with total request " + + logger.info(String.format("Created %d items with total request " + "charge of %.2f", families.size(), totalRequestCharge)); @@ -218,11 +232,11 @@ private void readItems(ArrayList familiesToCreate) { CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); double requestCharge = item.getRequestCharge(); Duration requestLatency = item.getRequestLatency(); - System.out.println(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", item.getResource().getId(), requestCharge, requestLatency)); } catch (CosmosClientException e) { e.printStackTrace(); - System.err.println(String.format("Read Item failed with %s", e)); + logger.error(String.format("Read Item failed with %s", e)); } // }); @@ -240,11 +254,11 @@ private void queryItems() { "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { - System.out.println("Got a page of query result with " + + logger.info("Got a page of query result with " + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); - System.out.println("Item Ids " + cosmosItemPropertiesFeedResponse + logger.info("Item Ids " + cosmosItemPropertiesFeedResponse .getResults() .stream() .map(Family::getId) @@ -256,19 +270,19 @@ private void queryItems() { private void shutdown() { try { //Clean shutdown - System.out.println("Deleting Cosmos DB resources"); - System.out.println("-Deleting container..."); + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); if (container != null) container.delete(); - System.out.println("-Deleting database..."); + logger.info("-Deleting database..."); if (database != null) database.delete(); - System.out.println("-Closing the client..."); + logger.info("-Closing the client..."); } catch (Exception err) { - System.err.println("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); err.printStackTrace(); } client.close(); - System.out.println("Done."); + logger.info("Done."); } } diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java index eaf23bb..8c8cf33 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -2,23 +2,33 @@ // Licensed under the MIT License. - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.cosmos.examples.storedprocedure.async; -import com.azure.cosmos.*; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosPagedFlux; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.CustomPOJO; -import com.azure.cosmos.models.*; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosStoredProcedureProperties; +import com.azure.cosmos.models.CosmosStoredProcedureRequestOptions; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import reactor.core.publisher.Mono; -import java.util.UUID; import java.util.concurrent.CountDownLatch; public class SampleStoredProcedureAsync { @@ -50,12 +60,12 @@ public static void main(String[] args) { try { p.sprocDemo(); - System.out.println("Demo complete, please hold while resources are released"); + logger.info("Demo complete, please hold while resources are released"); p.shutdown(); - System.out.println("Done.\n"); + logger.info("Done.\n"); } catch (Exception e) { e.printStackTrace(); - System.out.println(String.format("Cosmos getStarted failed with %s", e)); + logger.info(String.format("Cosmos getStarted failed with %s", e)); p.close(); } finally { } @@ -76,15 +86,15 @@ private void sprocDemo() throws Exception { executeStoredProcedure(); //Perform a point-read to confirm that the item with id test_doc exists - System.out.println("Checking that a document was created by the stored procedure..."); + logger.info("Checking that a document was created by the stored procedure..."); CosmosAsyncItemResponse test_resp = - container.readItem("test_doc",new PartitionKey("test_doc"),CustomPOJO.class).block(); - System.out.println(String.format( - "Status return value of point-read for document created by stored procedure (200 indicates success): %d",test_resp.getStatusCode())); + container.readItem("test_doc", new PartitionKey("test_doc"), CustomPOJO.class).block(); + logger.info(String.format( + "Status return value of point-read for document created by stored procedure (200 indicates success): %d", test_resp.getStatusCode())); } - public void setUp() throws Exception{ - System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + public void setUp() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -94,25 +104,25 @@ public void setUp() throws Exception{ // Create sync client // client = new CosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(defaultPolicy) - .setConsistencyLevel(ConsistencyLevel.EVENTUAL) - .buildAsyncClient(); + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); - System.out.println("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); - client.createDatabaseIfNotExists(databaseName).flatMap(databaseResponse -> { - database = databaseResponse.getDatabase(); - return Mono.empty(); - }).block(); + client.createDatabaseIfNotExists(databaseName).flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + return Mono.empty(); + }).block(); - CosmosContainerProperties containerProperties = + CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/id"); - database.createContainerIfNotExists(containerProperties, 400).flatMap(containerResponse -> { - container = containerResponse.getContainer(); - return Mono.empty(); - }).block(); + database.createContainerIfNotExists(containerProperties, 400).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + return Mono.empty(); + }).block(); } public void shutdown() throws Exception { @@ -121,24 +131,24 @@ public void shutdown() throws Exception { } public void createStoredProcedure() throws Exception { - System.out.println("Creating stored procedure...\n"); + logger.info("Creating stored procedure...\n"); sprocId = "createMyDocument"; String sprocBody = "function createMyDocument() {\n" + - "var documentToCreate = {\"id\":\"test_doc\"}\n" + - "var context = getContext();\n" + - "var collection = context.getCollection();\n" + - "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + - " function (err, documentCreated) {\n" + - "if (err) throw new Error('Error' + err.message);\n" + - "context.getResponse().setBody(documentCreated.id)\n" + - "});\n" + - "if (!accepted) return;\n" + - "}"; - CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,sprocBody); + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); container.getScripts() - .createStoredProcedure(storedProcedureDef, - new CosmosStoredProcedureRequestOptions()).block(); + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); } private void readAllSprocs() throws Exception { @@ -150,58 +160,60 @@ private void readAllSprocs() throws Exception { final CountDownLatch completionLatch = new CountDownLatch(1); - fluxResponse.flatMap(storedProcedureProperties -> { - System.out.println(String.format("Stored Procedure: %s\n",storedProcedureProperties.getId())); + logger.info(String.format("Stored Procedure: %s\n", storedProcedureProperties.getId())); return Mono.empty(); }).subscribe( - s -> {}, - err -> { - if (err instanceof CosmosClientException) { - //Client-specific errors - CosmosClientException cerr = (CosmosClientException)err; - cerr.printStackTrace(); - System.out.println(String.format("Read Item failed with %s\n", cerr)); - } else { - //General errors - err.printStackTrace(); + s -> { + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.info(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); } - - completionLatch.countDown(); - }, - () -> {completionLatch.countDown();} ); completionLatch.await(); } public void executeStoredProcedure() throws Exception { - System.out.println(String.format("Executing stored procedure %s...\n\n",sprocId)); + logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); options.setPartitionKey(new PartitionKey("test_doc")); container.getScripts() - .getStoredProcedure(sprocId) - .execute(null, options) - .flatMap(executeResponse -> { - System.out.println(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", - sprocId, - executeResponse.getResponseAsString(), - executeResponse.getStatusCode(), - executeResponse.getRequestCharge())); - return Mono.empty(); - }).block(); + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.getResponseAsString(), + executeResponse.getStatusCode(), + executeResponse.getRequestCharge())); + return Mono.empty(); + }).block(); } public void deleteStoredProcedure() throws Exception { - System.out.println("-Deleting stored procedure...\n"); + logger.info("-Deleting stored procedure...\n"); container.getScripts() - .getStoredProcedure(sprocId) - .delete().block(); - System.out.println("-Deleting database...\n"); + .getStoredProcedure(sprocId) + .delete().block(); + logger.info("-Deleting database...\n"); database.delete().block(); - System.out.println("-Closing client instance...\n"); + logger.info("-Closing client instance...\n"); client.close(); } } diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java index 4524507..5716608 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -3,11 +3,23 @@ package com.azure.cosmos.examples.storedprocedure.sync; -import com.azure.cosmos.*; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.CustomPOJO; -import com.azure.cosmos.models.*; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.CosmosStoredProcedureProperties; +import com.azure.cosmos.models.CosmosStoredProcedureRequestOptions; +import com.azure.cosmos.models.CosmosStoredProcedureResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,12 +55,12 @@ public static void main(String[] args) { try { p.sprocDemo(); - System.out.println("Demo complete, please hold while resources are released"); + logger.info("Demo complete, please hold while resources are released"); p.shutdown(); - System.out.println("Done.\n"); + logger.info("Done.\n"); } catch (Exception e) { e.printStackTrace(); - System.err.println(String.format("Cosmos getStarted failed with %s", e)); + logger.error(String.format("Cosmos getStarted failed with %s", e)); p.close(); } finally { } @@ -57,26 +69,26 @@ public static void main(String[] args) { // private void sprocDemo() throws Exception { - //Setup client, DB, and the container for which we will create stored procedures - //The container partition key will be id - setUp(); - - //Create stored procedure and list all stored procedures that have been created. - createStoredProcedure(); - readAllSprocs(); - - //Execute the stored procedure, which we expect will create an item with id test_doc - executeStoredProcedure(); - - //Perform a point-read to confirm that the item with id test_doc exists - System.out.println("Checking that a document was created by the stored procedure..."); - CosmosItemResponse test_resp = container.readItem("test_doc",new PartitionKey("test_doc"),CustomPOJO.class); - System.out.println(String.format( - "Result of point-read for document created by stored procedure (200 indicates success): %d",test_resp.getStatusCode())); + //Setup client, DB, and the container for which we will create stored procedures + //The container partition key will be id + setUp(); + + //Create stored procedure and list all stored procedures that have been created. + createStoredProcedure(); + readAllSprocs(); + + //Execute the stored procedure, which we expect will create an item with id test_doc + executeStoredProcedure(); + + //Perform a point-read to confirm that the item with id test_doc exists + logger.info("Checking that a document was created by the stored procedure..."); + CosmosItemResponse test_resp = container.readItem("test_doc", new PartitionKey("test_doc"), CustomPOJO.class); + logger.info(String.format( + "Result of point-read for document created by stored procedure (200 indicates success): %d", test_resp.getStatusCode())); } - public void setUp() throws Exception{ - System.out.println("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + public void setUp() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region @@ -86,19 +98,19 @@ public void setUp() throws Exception{ // Create sync client // client = new CosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(defaultPolicy) - .setConsistencyLevel(ConsistencyLevel.EVENTUAL) - .buildClient(); + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); - System.out.println("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); - database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); - CosmosContainerProperties containerProperties = + CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/id"); - container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); } public void shutdown() throws Exception { @@ -107,28 +119,28 @@ public void shutdown() throws Exception { } public void createStoredProcedure() throws Exception { - System.out.println("Creating stored procedure..."); + logger.info("Creating stored procedure..."); sprocId = "createMyDocument"; String sprocBody = "function createMyDocument() {\n" + - "var documentToCreate = {\"id\":\"test_doc\"}\n" + - "var context = getContext();\n" + - "var collection = context.getCollection();\n" + - "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + - " function (err, documentCreated) {\n" + - "if (err) throw new Error('Error' + err.message);\n" + - "context.getResponse().setBody(documentCreated.id)\n" + - "});\n" + - "if (!accepted) return;\n" + - "}"; - CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId,sprocBody); + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); container.getScripts() - .createStoredProcedure(storedProcedureDef, - new CosmosStoredProcedureRequestOptions()); + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()); } private void readAllSprocs() throws Exception { - System.out.println("Listing all stored procedures associated with container " + containerName + "\n"); + logger.info("Listing all stored procedures associated with container " + containerName + "\n"); FeedOptions feedOptions = new FeedOptions(); CosmosPagedIterable feedResponseIterable = @@ -136,37 +148,37 @@ private void readAllSprocs() throws Exception { Iterator feedResponseIterator = feedResponseIterable.iterator(); - while(feedResponseIterator.hasNext()) { + while (feedResponseIterator.hasNext()) { CosmosStoredProcedureProperties storedProcedureProperties = feedResponseIterator.next(); - System.out.println(String.format("Stored Procedure: %s",storedProcedureProperties)); + logger.info(String.format("Stored Procedure: %s", storedProcedureProperties)); } } public void executeStoredProcedure() throws Exception { - System.out.println(String.format("Executing stored procedure %s...\n\n",sprocId)); + logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); options.setPartitionKey(new PartitionKey("test_doc")); CosmosStoredProcedureResponse executeResponse = container.getScripts() - .getStoredProcedure(sprocId) - .execute(null, options); - - System.out.println(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", - sprocId, - executeResponse.responseAsString(), - executeResponse.getStatusCode(), - executeResponse.getRequestCharge())); + .getStoredProcedure(sprocId) + .execute(null, options); + + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.responseAsString(), + executeResponse.getStatusCode(), + executeResponse.getRequestCharge())); } public void deleteStoredProcedure() throws Exception { - System.out.println("-Deleting stored procedure...\n"); + logger.info("-Deleting stored procedure...\n"); container.getScripts() - .getStoredProcedure(sprocId) - .delete(); - System.out.println("-Deleting database...\n"); + .getStoredProcedure(sprocId) + .delete(); + logger.info("-Deleting database...\n"); database.delete(); - System.out.println("-Closing client instance...\n"); + logger.info("-Closing client instance...\n"); client.close(); - System.out.println("Done."); + logger.info("Done."); } } diff --git a/src/main/resources/log4j2.properties b/src/main/resources/log4j2.properties index 43f940f..f1178ab 100644 --- a/src/main/resources/log4j2.properties +++ b/src/main/resources/log4j2.properties @@ -1,21 +1,14 @@ # this is the log4j configuration for tests - # Set root logger level to WARN and its appender to STDOUT. -rootLogger.level = INFO -rootLogger.appenderRef.stdout.ref = STDOUT - -logger.netty.name = io.netty -logger.netty.level = INFO - -logger.reactor.name = io.reactivex -logger.reactor.level = INFO - -logger.cosmos.name = com.azure.cosmos -logger.cosmos.level = INFO - +rootLogger.level=INFO +rootLogger.appenderRef.stdout.ref=STDOUT +logger.netty.name=io.netty +logger.netty.level=INFO +logger.cosmos.name=com.azure.cosmos +logger.cosmos.level=INFO # STDOUT is a ConsoleAppender and uses PatternLayout. -appender.console.name = STDOUT -appender.console.type = Console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %m%n +appender.console.name=STDOUT +appender.console.type=Console +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%m%n From fec3a956cf7bfa3f5d951332724c475e989dfabb Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 13:45:36 -0700 Subject: [PATCH 040/110] Updated README.md to be IDE-agnostic --- README.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index b110ea0..abbd187 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,6 @@ Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and runnin ## Prerequisites -* A Java IDE such as IntelliJ IDEA or VSCode * Maven * Java SE JRE 8 * Setting up an Azure Cosmos DB account through the Azure Portal. The **Create a database account** section of [this guide](https://docs.microsoft.com/en-us/azure/cosmos-db/create-sql-api-java) walks you through account creation. @@ -46,17 +45,15 @@ Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven projec ## Running the sample -*If you are using Intellij IDEA as your Java IDE:* Once you have opened the project, go to the **Run/Debug Configurations** drop-down and choose **Edit Configurations**. -In the **Edit Configurations** dialog, click the **+** (**Add New Configuration**) button, select **Application** as the configuration type, - and give the new configuration a name. Once you are looking at the settings for your new Configuration, find **Environment variables** and paste +These environment variables must be set ``` ACCOUNT_HOST=your account hostname;ACCOUNT_KEY=your account master key ``` -which gives the sample read/write access to your account. +in order to give the samples read/write access to your account. -To choose which sample will run, populate the **Main class** field of the Configuration with +To run a sample, specify its Main Class ``` com.azure.cosmos.examples.sample.synchronicity.MainClass From b062c616b804e13880014e7ed67ebb222a2668e1 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 13:52:52 -0700 Subject: [PATCH 041/110] Improved explanatory comments in all samples --- .../changefeed/SampleChangeFeedProcessor.java | 14 +++++++------- .../async/SampleCRUDQuickstartAsync.java | 17 ++++++++--------- .../sync/SampleCRUDQuickstart.java | 17 ++++++++--------- .../async/SampleIndexManagementAsync.java | 7 +++---- .../sync/SampleIndexManagement.java | 7 +++---- .../async/SampleStoredProcedureAsync.java | 8 ++++++-- .../sync/SampleStoredProcedure.java | 8 ++++++-- 7 files changed, 41 insertions(+), 37 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index 5a28566..48000b6 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -28,7 +28,13 @@ /** * Sample for Change Feed Processor. - */ + * This sample models an application where documents are being inserted into one container (the "feed container"), + * and meanwhile another worker thread or worker application is pulling inserted documents from the feed container's Change Feed + * and operating on them in some way. For one or more workers to process the Change Feed of a container, the workers must first contact the server + * and "lease" access to monitor one or more partitions of the feed container. The Change Feed Processor Library + * handles leasing automatically for you, however you must create a separate "lease container" where the Change Feed + * Processor Library can store and track leases container partitions. +*/ public class SampleChangeFeedProcessor { public static int WAIT_FOR_WORK = 60000; @@ -45,12 +51,6 @@ public static void main(String[] args) { logger.info("BEGIN Sample"); try { - //This sample models an application where documents are being inserted into one container (the "feed container"), - //and meanwhile another worker thread or worker application is pulling inserted documents from the feed container's Change Feed - //and operating on them in some way. For one or more workers to process the Change Feed of a container, the workers must first contact the server - //and "lease" access to monitor one or more partitions of the feed container. The Change Feed Processor Library - //handles leasing automatically for you, however you must create a separate "lease container" where the Change Feed - //Processor Library can store and track leases container partitions. //Summary of the next four commands: //-Create an asynchronous Azure Cosmos DB client and database so that we can issue async requests to the DB diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 02c9c1e..5e67558 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -51,7 +51,14 @@ public void close() { /** * Run a Hello CosmosDB console application. * - * @param args command line args. + * This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations + * with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will + * 1. Create asynchronous client, database and container instances + * 2. Create several items + * 3. Upsert one of the items + * 4. Perform a query over the items + * 5. Delete an item + * 6. Delete the Cosmos DB database and container resources and close the client. */ //

public static void main(String[] args) { @@ -73,14 +80,6 @@ public static void main(String[] args) { //
private void getStartedDemo() throws Exception { - //This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations - //with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will - //1. Create asynchronous client, database and container instances - //2. Create several items - //3. Upsert one of the items - //4. Perform a query over the items - //5. Delete an item - //6. Delete the Cosmos DB database and container resources and close the client. logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index e508f6b..cd029ec 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -48,7 +48,14 @@ public void close() { /** * Run a Hello CosmosDB console application. * - * @param args command line args. + * This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations + * with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will + * 1. Create synchronous client, database and container instances + * 2. Create several items + * 3. Upsert one of the items + * 4. Perform a query over the items + * 5. Delete an item + * 6. Delete the Cosmos DB database and container resources and close the client. * */ //
public static void main(String[] args) { @@ -70,14 +77,6 @@ public static void main(String[] args) { //
private void getStartedDemo() throws Exception { - //This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations - //with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will - //1. Create synchronous client, database and container instances - //2. Create several items - //3. Upsert one of the items - //4. Perform a query over the items - //5. Delete an item - //6. Delete the Cosmos DB database and container resources and close the client. logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java index 4197819..9eeac8c 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -56,7 +56,9 @@ public void close() { /** * Run a Hello CosmosDB console application. * - * @param args command line args. + * This sample is similar to SampleCRUDQuickstartAsync, but modified to show indexing capabilities of Cosmos DB. + * Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of + * indexing capabilities. */ //
public static void main(String[] args) { @@ -78,9 +80,6 @@ public static void main(String[] args) { //
private void indexManagementDemo() throws Exception { - //This sample is similar to SampleCRUDQuickstartAsync, but modified to show indexing capabilities of Cosmos DB. - //Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of - //indexing capabilities. logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java index 4fb1965..ef15823 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -52,7 +52,9 @@ public void close() { /** * Run a Hello CosmosDB console application. * - * @param args command line args. + * This sample is similar to SampleCRUDQuickstart, but modified to show indexing capabilities of Cosmos DB. + * Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of + * indexing capabilities. */ //
public static void main(String[] args) { @@ -75,9 +77,6 @@ public static void main(String[] args) { //
private void indexManagementDemo() throws Exception { - //This sample is similar to SampleCRUDQuickstart, but modified to show indexing capabilities of Cosmos DB. - //Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of - //indexing capabilities. logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java index 8c8cf33..4aa2427 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -50,9 +50,13 @@ public void close() { } /** - * Run a Hello CosmosDB console application. + * Stored Procedure Example + * + * This sample code demonstrates creation, execution, and effects of stored procedures + * using Java SDK. A stored procedure is created which will insert a JSON object into + * a Cosmos DB container. The sample executes the stored procedure and then performs + * a point-read to confirm that the stored procedure had the intended effect. * - * @param args command line args. */ //
public static void main(String[] args) { diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java index 5716608..55331f2 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -45,9 +45,13 @@ public void close() { } /** - * Run a Hello CosmosDB console application. + * Stored Procedure Example + * + * This sample code demonstrates creation, execution, and effects of stored procedures + * using Java SDK. A stored procedure is created which will insert a JSON object into + * a Cosmos DB container. The sample executes the stored procedure and then performs + * a point-read to confirm that the stored procedure had the intended effect. * - * @param args command line args. */ //
public static void main(String[] args) { From 00f514eb241e4872645b2a7774ce3d275feba5f8 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 13:58:39 -0700 Subject: [PATCH 042/110] Cleaned up code --- .../cosmos/examples/changefeed/SampleChangeFeedProcessor.java | 2 +- .../crudquickstart/async/SampleCRUDQuickstartAsync.java | 2 +- .../examples/crudquickstart/sync/SampleCRUDQuickstart.java | 2 +- .../indexmanagement/async/SampleIndexManagementAsync.java | 2 +- .../examples/indexmanagement/sync/SampleIndexManagement.java | 2 +- .../storedprocedure/async/SampleStoredProcedureAsync.java | 3 +-- .../examples/storedprocedure/sync/SampleStoredProcedure.java | 3 +-- 7 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java index 48000b6..5c07fea 100644 --- a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -34,7 +34,7 @@ * and "lease" access to monitor one or more partitions of the feed container. The Change Feed Processor Library * handles leasing automatically for you, however you must create a separate "lease container" where the Change Feed * Processor Library can store and track leases container partitions. -*/ + */ public class SampleChangeFeedProcessor { public static int WAIT_FOR_WORK = 60000; diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 5e67558..379ee18 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -50,7 +50,7 @@ public void close() { /** * Run a Hello CosmosDB console application. - * + *

* This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations * with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will * 1. Create asynchronous client, database and container instances diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java index cd029ec..0659d25 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -47,7 +47,7 @@ public void close() { /** * Run a Hello CosmosDB console application. - * + *

* This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations * with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will * 1. Create synchronous client, database and container instances diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java index 9eeac8c..34628ff 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -55,7 +55,7 @@ public void close() { /** * Run a Hello CosmosDB console application. - * + *

* This sample is similar to SampleCRUDQuickstartAsync, but modified to show indexing capabilities of Cosmos DB. * Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of * indexing capabilities. diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java index ef15823..748dbfe 100644 --- a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -51,7 +51,7 @@ public void close() { /** * Run a Hello CosmosDB console application. - * + *

* This sample is similar to SampleCRUDQuickstart, but modified to show indexing capabilities of Cosmos DB. * Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of * indexing capabilities. diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java index 4aa2427..21ca683 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -51,12 +51,11 @@ public void close() { /** * Stored Procedure Example - * + *

* This sample code demonstrates creation, execution, and effects of stored procedures * using Java SDK. A stored procedure is created which will insert a JSON object into * a Cosmos DB container. The sample executes the stored procedure and then performs * a point-read to confirm that the stored procedure had the intended effect. - * */ //

public static void main(String[] args) { diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java index 55331f2..2061c5a 100644 --- a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -46,12 +46,11 @@ public void close() { /** * Stored Procedure Example - * + *

* This sample code demonstrates creation, execution, and effects of stored procedures * using Java SDK. A stored procedure is created which will insert a JSON object into * a Cosmos DB container. The sample executes the stored procedure and then performs * a point-read to confirm that the stored procedure had the intended effect. - * */ //

public static void main(String[] args) { From 75caa497e7bcd1b176e9d6bf901761c610ee6c97 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 14:51:45 -0700 Subject: [PATCH 043/110] Added Reactor Pattern Guide --- reactor-pattern-guide.md | 78 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 reactor-pattern-guide.md diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md new file mode 100644 index 0000000..2b21721 --- /dev/null +++ b/reactor-pattern-guide.md @@ -0,0 +1,78 @@ +# Guide to Reactive Programming Programming and Reactor Design Patterns with Azure Cosmos DB Java SDK + +## Introduction + +The purpose of this document is to enable +* users with RxJava experience (i.e. from working with Cosmos DB Async Java SDK v2.x.x) +* and users who are only familiar with the Cosmos DB Legacy Sync SDK + +to get started programming with the **Cosmos DB Async Java SDK v3.x.x, v4.x.x and above** by providing background, Reactive Streams programming guidelines, and one-to-one use-case comparison between Project Reactive and RxJava. For Async Java SDK v3.x.x and above, non-blocking requests are implemented using [Project Reactive](https://projectreactor.io/), superseding RxJava which was used in the Async Java SDK v2.x.x. + +**Overview of Asynchronous Library by Cosmos DB Java Async SDK Version** +| Java Async SDK Version | Async Library | +| :--------------------: | :--------------: | +| 1.x.x | RxJava | +| 2.x.x | RxJava | +| 3.x.x | Project Reactive | +| 4.x.x | Project Reactive | + + + +## Background: Reactive Programming, Reactive Streams, Reactor, Rx Java, and Project Reactive + +### 1. ***Reactive Programming and Standards*** + +Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of data items passing through a pipeline of operations in which each operation affects the data which flows downstream. + +**Imperative programming** is the more common or "familiar" programming paradigm in which program operation and control flow are expressed by sequential commands which manipulate program state (variables). A simple imperative program in pseudocode is + + If input data available, read into variable x + Do operation1 on variable x + Then do operation2 on variable y + Then do operation3 on variable z + And then print the result + +Reactive Programming is a **declarative** paradigm - specifically a **dataflow** paradigm - in which the programmer must describe a directed graph of operations which represents the logic of the program. A simple declarative dataflow representation of the above program in pseudocode is: + + asynchronous data source => operation1 => operation2 => operation3 => print + +How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on the slowest pipelined operation. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process - whereas in a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level to ensure that no operation receives data faster than it can process. + +[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for the asynchronous programming libraries which have been used in the Cosmos DB Async Java SDKs. + +### 2. ***Reactive Streams Implementations*** +[RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM) is no longer being used after Java SDK v2.x.x. + +[Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework used in Java SDK v3.x.x and above. + +The purpose of the rest of this document is to help you start using Reactor with as little trouble as possible. This includes suggestions for upgrading your code from RxJava to Reactor and also Reactor design pattern guidelines. + +## Getting started quickly with Reactor + +### 1. ***At a Glance: RxJava vs Project Reactive: Design Pattern Comparison*** + +### 2. ***Design Patterns with Project Reactive: Building Publish-Subscribe Pipelines*** + +# => Functionality + +# => Terminology + +# Diagrams + +# Tips and Tricks + +# Troubleshooting + +# Hazards + +C10K problem + +# Examples + +# 5. For More Information + +* If you would like to learn more about Project Reactor and Reactive Streams, or get started writing code using Reactor, you can visit [the Project Reactor website.](https://projectreactor.io/) + +* [A gentle introduction to Reactor from tech.io](https://tech.io/playgrounds/929/reactive-programming-with-reactor-3/Intro) + +* Reactive Extensions for the JVM (RxJava), a project of ReactiveX **which is no longer used by Cosmos DB** but was previously used to facilitate non-blocking access in Async Java SDK v2.x.x and below. \ No newline at end of file From 1b1f64e71f1579254f25c49076d162706224cc57 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 16:24:12 -0700 Subject: [PATCH 044/110] Simple Reactive Stream example --- reactor-pattern-guide.md | 61 +++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 2b21721..52ff83d 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -40,36 +40,71 @@ How this differs from imperative programming, is that the coder is describing th [Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for the asynchronous programming libraries which have been used in the Cosmos DB Async Java SDKs. -### 2. ***Reactive Streams Implementations*** +### 2. ***Available Reactive Streams Frameworks for Java/JVM*** [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM) is no longer being used after Java SDK v2.x.x. [Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework used in Java SDK v3.x.x and above. The purpose of the rest of this document is to help you start using Reactor with as little trouble as possible. This includes suggestions for upgrading your code from RxJava to Reactor and also Reactor design pattern guidelines. -## Getting started quickly with Reactor +## Reactor Design Patterns -### 1. ***At a Glance: RxJava vs Project Reactive: Design Pattern Comparison*** +To write a program using Reactor, you will need to describe one or more Reactive Streams. In typical uses of Reactor, you describe a stream +by (1) creating a *Publisher* (which originates data asynchronously) and a *Subscriber* (which consumes data and operates on it asynchronously), and (2) +describing a pipeline from Publisher to Subscriber, in which the data from Publisher is transformed at each pipeline stage before eventually +ending in Subscriber. In this section we will discuss this process in more detail and demonstrate how Reactor lets you define the transforming operation at each +pipeline stage. -### 2. ***Design Patterns with Project Reactive: Building Publish-Subscribe Pipelines*** +### 1. ***Describing a Reactive Stream (A Publisher-Subscriber Pipeline)*** -# => Functionality +Here is a simple Reactive Stream: -# => Terminology +```java +Flux.just("Hello","Cosmos DB") + .subscribe(System.out::println); +``` -# Diagrams +The Publisher is ``` Flux.just("Hello","Cosmos DB") ```. ```Flux.just()``` is a *Reactor factory method* which allows you to define a Publisher. +``` Flux.just("Hello","Cosmos DB") ``` will asynchronously send ```Hello``` and ```Cosmos DB``` as two Strings to the next stage of the Publisher-Subscriber +pipeline. -# Tips and Tricks +Here, the Publisher-Subscriber pipeline is simple - the next pipeline stage after the Publisher is the Subscriber, ```.subscribe(System.out::println)```, which +will receive the two Strings as they arrive from upstream and process them by applying ```System.out::println``` to each one, again asynchronously. +The output would be -# Troubleshooting +```java +Hello +Cosmos DB +``` -# Hazards +This is a simple Publisher-Subscriber pipeline with no operations to transform the data. +The call to ```subscribe()``` is what ultimately triggers data to flow through the Reactive Stream +and carry out the logic of your program. Simply calling -C10K problem +```java +Flux.just("Hello","Cosmos DB"); +``` -# Examples +without calling ```subscribe()``` will **not** execute the logic of your program; this line will simply return a ```Flux``` which represents +the pipeline of operations starting from the Publisher (which in this case, consists only of the Publisher). This ```Flux``` can be stored in a +variable and used like any other variable. For example you can return its value and use that value elsewhere in the program, i.e. by subscribing to it in another function: -# 5. For More Information +```java +private Flux some_function() { + return Flux.just("Hello","Cosmos DB"); +} + +public void calling_function() { + Flux str_flux = some_function(); //Returns a representation of a Reactive Stream + str_flux.subscribe(System.out::println); //Produces the same output as the original example, by subscribing to the Reactive Stream +} +``` + + +## Reactor vs RxJava + + +## For More Information * If you would like to learn more about Project Reactor and Reactive Streams, or get started writing code using Reactor, you can visit [the Project Reactor website.](https://projectreactor.io/) From dfeda9254c339936c65fbd0dce3c9d59cd046c33 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 10 Mar 2020 16:37:51 -0700 Subject: [PATCH 045/110] Outlined what I plan to write about --- reactor-pattern-guide.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 52ff83d..5a47d37 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -100,9 +100,22 @@ public void calling_function() { } ``` +This approach of defining Reactive Streams and then subscribing to them later can be useful - **just remember that the logic of your Reactive Stream will +not be executed until you ```subscribe()``` to it.** + +TODO: +* Introduce ```Mono``` and ```Flux``` +* Reactor Factory Methods +* Operations to transform data - ```flatMap()```, ```reduce()```, nested imperative code +* More about ```subscribe()``` and ```onNext()```/```onComplete```/```onError``` and ```block()``` +* ```subscribeOn()```, ```publishOn```, schedulers +* ```onSuccess()``` ## Reactor vs RxJava +* Compare all of the above between Reactor and RxJava +* rxJava observeOn <-> reactive stream publishOn +* rxJava subscribeOn <-> reactive stream subscribeOn ## For More Information From ccc8d13c71e9ddd581be41161123a20de65e0388 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 13 Mar 2020 09:05:01 -0700 Subject: [PATCH 046/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 5a47d37..7fbbf6b 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,23 +1,5 @@ # Guide to Reactive Programming Programming and Reactor Design Patterns with Azure Cosmos DB Java SDK -## Introduction - -The purpose of this document is to enable -* users with RxJava experience (i.e. from working with Cosmos DB Async Java SDK v2.x.x) -* and users who are only familiar with the Cosmos DB Legacy Sync SDK - -to get started programming with the **Cosmos DB Async Java SDK v3.x.x, v4.x.x and above** by providing background, Reactive Streams programming guidelines, and one-to-one use-case comparison between Project Reactive and RxJava. For Async Java SDK v3.x.x and above, non-blocking requests are implemented using [Project Reactive](https://projectreactor.io/), superseding RxJava which was used in the Async Java SDK v2.x.x. - -**Overview of Asynchronous Library by Cosmos DB Java Async SDK Version** -| Java Async SDK Version | Async Library | -| :--------------------: | :--------------: | -| 1.x.x | RxJava | -| 2.x.x | RxJava | -| 3.x.x | Project Reactive | -| 4.x.x | Project Reactive | - - - ## Background: Reactive Programming, Reactive Streams, Reactor, Rx Java, and Project Reactive ### 1. ***Reactive Programming and Standards*** @@ -123,4 +105,4 @@ TODO: * [A gentle introduction to Reactor from tech.io](https://tech.io/playgrounds/929/reactive-programming-with-reactor-3/Intro) -* Reactive Extensions for the JVM (RxJava), a project of ReactiveX **which is no longer used by Cosmos DB** but was previously used to facilitate non-blocking access in Async Java SDK v2.x.x and below. \ No newline at end of file +* Reactive Extensions for the JVM (RxJava), a project of ReactiveX **which is no longer used by Cosmos DB** but was previously used to facilitate non-blocking access in Async Java SDK v2.x.x and below. From e2ecdc624502b77dd8f950978e1542df7761e20a Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 13 Mar 2020 09:06:07 -0700 Subject: [PATCH 047/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 7fbbf6b..f89a2d2 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,4 +1,4 @@ -# Guide to Reactive Programming Programming and Reactor Design Patterns with Azure Cosmos DB Java SDK +# Reactive Pattern Guide: A Guide for Reactive Programming with Reactor ## Background: Reactive Programming, Reactive Streams, Reactor, Rx Java, and Project Reactive From db030a80c4c5e437521bfe669f12b90d627ee870 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 13 Mar 2020 09:06:22 -0700 Subject: [PATCH 048/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index f89a2d2..c88a62c 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,4 +1,4 @@ -# Reactive Pattern Guide: A Guide for Reactive Programming with Reactor +# Reactive Pattern Guide ## Background: Reactive Programming, Reactive Streams, Reactor, Rx Java, and Project Reactive From fc5aa92e080f4d13478bd812ecf880596e529f59 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 16 Mar 2020 13:40:44 -0700 Subject: [PATCH 049/110] First attempt at async request throughput sample using Flux --- .../async/SampleRequestThroughputAsync.java | 196 ++++++++++++++++++ .../sync/SampleRequestThroughput.java | 4 + src/main/resources/log4j2.properties | 2 + 3 files changed, 202 insertions(+) create mode 100644 src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java create mode 100644 src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java new file mode 100644 index 0000000..310d34f --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -0,0 +1,196 @@ +package com.azure.cosmos.examples.requestthroughput.async; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.PartitionKey; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +public class SampleRequestThroughputAsync { + + private static CosmosAsyncClient client; + private static CosmosAsyncDatabase database; + private static CosmosAsyncContainer container; + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public static void main(String[] args) { + try { + requestThroughputDemo(); + } catch(Exception err) { + logger.error("Failed running demo: ", err); + } + } + + public static void requestThroughputDemo() { + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + AtomicBoolean resourcesCreated = new AtomicBoolean(); + resourcesCreated.set(false); + + // This code describes the logic of database and container creation as a reactive stream (but crucially doesn't actually create anything). + Mono databaseContainerIfNotExist = client.createDatabaseIfNotExists("ContosoInventoryDB").flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + logger.info("Got DB."); + CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); + return database.createContainerIfNotExists(containerProperties, 400); + }).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + logger.info("Got container."); + return Mono.empty(); + }); + + // This code asynchronously sends a request to create a database and container (returns immediately). + // When the server sends a response back, a flag is set to notify the rest of the program. + logger.info("Creating database and container asynchronously..."); + databaseContainerIfNotExist.subscribe(voidItem -> {}, err -> {}, + () -> { + logger.info("Finished creating resources.\n\n"); + resourcesCreated.set(true); + }); + + // Async resource creation frees our application to do other things in the meantime :) + logger.info("Doing other things while resources are being created..."); + while (!resourcesCreated.get()) doOtherThings(); + + // And we pick up our database and container when they are ready. + logger.info("Inserting 10 documents..."); + + AtomicBoolean docsInserted = new AtomicBoolean(); + docsInserted.set(false); + + ExecutorService ex = Executors.newFixedThreadPool(30); + Scheduler customScheduler = Schedulers.fromExecutor(ex); + + int number_of_docs = 10; + Flux.fromIterable(generateDocs(number_of_docs)) //Publisher + .subscribeOn(customScheduler) + .flatMap(doc -> { + logger.info("Sending request..."); + // Stream operation 1: insert doc into container + return container.createItem(doc); + }) + .delayElements(Duration.ofSeconds(1)) + .map(itemResponse -> { + // Stream operation 2: simulated network response time + //simulateNetworkResponseTime(); + + return itemResponse; + }) + .flatMap(itemResponse -> { + // Stream operation 3: print item response + logger.info(String.format("Inserted item with request charge of %.2f within duration %s", + itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + return Mono.empty(); + }) + .subscribe(voidItem -> {}, err -> {}, + () -> { + logger.info("Finished inserting {} documents.",number_of_docs); + docsInserted.set(true); + } + ); + + logger.info("Doing other things while docs are being inserted..."); + while (!docsInserted.get()) doOtherThings(); + + logger.info("Deleting resources."); + + AtomicBoolean resourcesDeleted = new AtomicBoolean(); + resourcesDeleted.set(false); + + container.delete() + .flatMap(containerResponse -> database.delete()) + .subscribe(dbItem -> {}, err -> {}, + () -> { + logger.info("Finished deleting resources."); + resourcesDeleted.set(true); + }); + + logger.info("Doing other things while deleting resources..."); + while (!resourcesDeleted.get()) doOtherThings(); + + logger.info("Closing client..."); + + client.close(); + + logger.info("Done with demo."); + + } + + /* Intentionally exaggerated 2sec network response time for requests */ + private static void simulateNetworkResponseTime() { + try { + Thread.sleep(1000); + } catch (Exception err) { + logger.error("Simulated response time failed: ",err); + } + } + + /* Placeholder for background tasks to run during resource creation */ + private static void doOtherThings() { + // Not much to do right now :) + } + + /* Delete the resources created for this example. */ + private static void cleanup() { + if (container != null) + logger.info("Deleting container..."); + container.delete(); + + if (database != null) + logger.info("Deleting database..."); + database.delete(); + + client.close(); + } + + /* Generate ArrayList of N unique documents (assumes /pk is id) */ + private static ArrayList generateDocs(int N) { + ArrayList docs = new ArrayList(); + ObjectMapper mapper = Utils.getSimpleObjectMapper(); + + try { + for (int i = 1; i <= N; i++) { + docs.add(mapper.readTree( + "{" + + "\"id\": " + + "\"" + System.currentTimeMillis() + "\"" + + "}" + )); + + Thread.sleep(2*i); // Unique ids w/ nonuniform spacing + } + } catch (Exception err) { + logger.error("Failed generating documents: ", err); + } + + return docs; + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java new file mode 100644 index 0000000..25c893d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -0,0 +1,4 @@ +package com.azure.cosmos.examples.requestthroughput.sync; + +public class SampleRequestThroughput { +} diff --git a/src/main/resources/log4j2.properties b/src/main/resources/log4j2.properties index f1178ab..a92d123 100644 --- a/src/main/resources/log4j2.properties +++ b/src/main/resources/log4j2.properties @@ -1,5 +1,7 @@ # this is the log4j configuration for tests # Set root logger level to WARN and its appender to STDOUT. +filter.threshold.type = ThresholdFilter +filter.threshold.level = DEBUG rootLogger.level=INFO rootLogger.appenderRef.stdout.ref=STDOUT logger.netty.name=io.netty From 5c66caef141acaf29410c5ea397b8545ff293c98 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 16 Mar 2020 14:04:00 -0700 Subject: [PATCH 050/110] Different structure, Mono-based --- .../async/SampleRequestThroughputAsync.java | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java index 310d34f..f6481ae 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -27,6 +27,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; public class SampleRequestThroughputAsync { @@ -89,35 +90,33 @@ public static void requestThroughputDemo() { Scheduler customScheduler = Schedulers.fromExecutor(ex); int number_of_docs = 10; - Flux.fromIterable(generateDocs(number_of_docs)) //Publisher - .subscribeOn(customScheduler) - .flatMap(doc -> { - logger.info("Sending request..."); - // Stream operation 1: insert doc into container - return container.createItem(doc); - }) - .delayElements(Duration.ofSeconds(1)) - .map(itemResponse -> { - // Stream operation 2: simulated network response time - //simulateNetworkResponseTime(); - - return itemResponse; - }) - .flatMap(itemResponse -> { - // Stream operation 3: print item response - logger.info(String.format("Inserted item with request charge of %.2f within duration %s", - itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); - return Mono.empty(); - }) - .subscribe(voidItem -> {}, err -> {}, - () -> { - logger.info("Finished inserting {} documents.",number_of_docs); - docsInserted.set(true); - } - ); + + AtomicInteger numberOfDocsInserted = new AtomicInteger(0); + + generateDocs(number_of_docs).forEach(doc -> { + // Insert 10 docs into Cosmos DB + logger.info("Sending request..."); + + // Publisher: createItem inserts a doc & publishes request response... + container.createItem(doc) + .flatMap(itemResponse -> { + // ...Streaming operation: lambda prints request response... + logger.info(String.format("Inserted item (return code %d) with request charge of %.2f within duration %s", + itemResponse.getStatusCode(), itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + return Mono.empty(); + }).subscribe(voidItem->{}, err->{}, () -> { + // ...Subscribing to the publisher triggers execution. + // Subscriber onComplete() lambda increments throughput counter + numberOfDocsInserted.incrementAndGet(); + }); + }); logger.info("Doing other things while docs are being inserted..."); - while (!docsInserted.get()) doOtherThings(); + while (numberOfDocsInserted.get() < number_of_docs) doOtherThings(); + + logger.info("Done."); + + /* logger.info("Deleting resources."); @@ -140,7 +139,7 @@ public static void requestThroughputDemo() { client.close(); logger.info("Done with demo."); - + */ } /* Intentionally exaggerated 2sec network response time for requests */ From b1a9a1e55515b4d630f44e36ab34e8d5bedff6f7 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 16 Mar 2020 20:59:12 -0700 Subject: [PATCH 051/110] Working sync and async request throughput samples --- .../azure/cosmos/examples/common/Profile.java | 52 ++++++ .../async/SampleRequestThroughputAsync.java | 161 +++++------------- .../sync/SampleRequestThroughput.java | 79 +++++++++ 3 files changed, 175 insertions(+), 117 deletions(-) create mode 100644 src/main/java/com/azure/cosmos/examples/common/Profile.java diff --git a/src/main/java/com/azure/cosmos/examples/common/Profile.java b/src/main/java/com/azure/cosmos/examples/common/Profile.java new file mode 100644 index 0000000..8471fde --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Profile.java @@ -0,0 +1,52 @@ +package com.azure.cosmos.examples.common; + +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.implementation.Utils; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.UUID; + +public class Profile { + + private static long tic_ns = System.nanoTime(); // For execution timing + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + /* tic/toc pair - measure ms execution time between tic() and toc_ms() + Undefined behavior if you you do not pair 'tic()' followed by 'toc_ms()' + */ + public static void tic() {tic_ns = System.nanoTime();} + public static double toc_ms() {return ((double)(System.nanoTime()-tic_ns))/1000000.0;}; + + /* Generate ArrayList of N unique documents (assumes /pk is id) */ + public static ArrayList generateDocs(int N) { + ArrayList docs = new ArrayList(); + ObjectMapper mapper = Utils.getSimpleObjectMapper(); + + try { + for (int i = 1; i <= N; i++) { + docs.add(mapper.readTree( + "{" + + "\"id\": " + + "\"" + UUID.randomUUID().toString() + "\"" + + "}" + )); + + + } + } catch (Exception err) { + logger.error("Failed generating documents: ", err); + } + + return docs; + } + + /* Placeholder for background tasks to run during resource creation */ + public static void doOtherThings() { + // Not much to do right now :) + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java index f6481ae..975299d 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -6,35 +6,21 @@ import com.azure.cosmos.CosmosAsyncContainer; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; -import com.azure.cosmos.implementation.Utils; -import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.examples.common.Profile; import com.azure.cosmos.models.CosmosContainerProperties; -import com.azure.cosmos.models.PartitionKey; import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; -import java.time.Duration; import java.util.ArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; public class SampleRequestThroughputAsync { - private static CosmosAsyncClient client; - private static CosmosAsyncDatabase database; - private static CosmosAsyncContainer container; - protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + protected static Logger logger = LoggerFactory.getLogger(SampleRequestThroughputAsync.class.getSimpleName()); public static void main(String[] args) { try { @@ -44,7 +30,18 @@ public static void main(String[] args) { } } + private static CosmosAsyncClient client; + private static CosmosAsyncDatabase database; + private static CosmosAsyncContainer container; + private static AtomicBoolean resourcesCreated = new AtomicBoolean(false); + private static AtomicInteger numberOfDocsInserted = new AtomicInteger(0); + private static AtomicBoolean resourcesDeleted = new AtomicBoolean(false); + public static void requestThroughputDemo() { + ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); + + // Create Async client. + // Building an async client is still a sync operation. client = new CosmosClientBuilder() .setEndpoint(AccountSettings.HOST) .setKey(AccountSettings.MASTER_KEY) @@ -52,23 +49,20 @@ public static void requestThroughputDemo() { .setConsistencyLevel(ConsistencyLevel.EVENTUAL) .buildAsyncClient(); - AtomicBoolean resourcesCreated = new AtomicBoolean(); - resourcesCreated.set(false); - - // This code describes the logic of database and container creation as a reactive stream (but crucially doesn't actually create anything). + // Describe the logic of database and container creation using Reactor... Mono databaseContainerIfNotExist = client.createDatabaseIfNotExists("ContosoInventoryDB").flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); logger.info("Got DB."); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); - return database.createContainerIfNotExists(containerProperties, 400); + return database.createContainerIfNotExists(containerProperties, 100000); }).flatMap(containerResponse -> { container = containerResponse.getContainer(); logger.info("Got container."); return Mono.empty(); }); - // This code asynchronously sends a request to create a database and container (returns immediately). - // When the server sends a response back, a flag is set to notify the rest of the program. + // ...it doesn't execute until you subscribe(). + // The async call returns immediately... logger.info("Creating database and container asynchronously..."); databaseContainerIfNotExist.subscribe(voidItem -> {}, err -> {}, () -> { @@ -76,53 +70,38 @@ public static void requestThroughputDemo() { resourcesCreated.set(true); }); - // Async resource creation frees our application to do other things in the meantime :) - logger.info("Doing other things while resources are being created..."); - while (!resourcesCreated.get()) doOtherThings(); - - // And we pick up our database and container when they are ready. - logger.info("Inserting 10 documents..."); + // ...so we can do other things until async response arrives! + logger.info("Doing other things until async resource creation completes......"); + while (!resourcesCreated.get()) Profile.doOtherThings(); - AtomicBoolean docsInserted = new AtomicBoolean(); - docsInserted.set(false); + // Container is created. Generate many docs to insert. + int number_of_docs = 4000000; + logger.info("Generating {} documents...", number_of_docs); + ArrayList docs = Profile.generateDocs(number_of_docs); - ExecutorService ex = Executors.newFixedThreadPool(30); - Scheduler customScheduler = Schedulers.fromExecutor(ex); - - int number_of_docs = 10; - - AtomicInteger numberOfDocsInserted = new AtomicInteger(0); - - generateDocs(number_of_docs).forEach(doc -> { - // Insert 10 docs into Cosmos DB - logger.info("Sending request..."); - - // Publisher: createItem inserts a doc & publishes request response... + // Insert many docs into container... + logger.info("Inserting {} documents...", number_of_docs); + docs.forEach(doc -> { + // ...by describing logic of item insertion using Reactor. Then subscribe() to execute. container.createItem(doc) + // ^Publisher: upon subscription, createItem inserts a doc & + // publishes request response to the next operation... .flatMap(itemResponse -> { - // ...Streaming operation: lambda prints request response... - logger.info(String.format("Inserted item (return code %d) with request charge of %.2f within duration %s", - itemResponse.getStatusCode(), itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + // ...Streaming operation: count each doc & check success... + if (itemResponse.getStatusCode() == 201) + numberOfDocsInserted.getAndIncrement(); + else + logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); return Mono.empty(); - }).subscribe(voidItem->{}, err->{}, () -> { - // ...Subscribing to the publisher triggers execution. - // Subscriber onComplete() lambda increments throughput counter - numberOfDocsInserted.incrementAndGet(); - }); + }).subscribe(); // ...Subscribing to the publisher triggers stream execution. }); - logger.info("Doing other things while docs are being inserted..."); - while (numberOfDocsInserted.get() < number_of_docs) doOtherThings(); - - logger.info("Done."); - - /* + // Do other things until async response arrives + logger.info("Doing other things until async doc inserts complete..."); + while (numberOfDocsInserted.get() < number_of_docs) Profile.doOtherThings(); + // Inserts are complete. Cleanup (asynchronously!) logger.info("Deleting resources."); - - AtomicBoolean resourcesDeleted = new AtomicBoolean(); - resourcesDeleted.set(false); - container.delete() .flatMap(containerResponse -> database.delete()) .subscribe(dbItem -> {}, err -> {}, @@ -131,65 +110,13 @@ public static void requestThroughputDemo() { resourcesDeleted.set(true); }); - logger.info("Doing other things while deleting resources..."); - while (!resourcesDeleted.get()) doOtherThings(); + // Do other things until async response arrives + logger.info("Do other things until async resource delete completes..."); + while (!resourcesDeleted.get()) Profile.doOtherThings(); + // Close client. This is always sync. logger.info("Closing client..."); - client.close(); - logger.info("Done with demo."); - */ - } - - /* Intentionally exaggerated 2sec network response time for requests */ - private static void simulateNetworkResponseTime() { - try { - Thread.sleep(1000); - } catch (Exception err) { - logger.error("Simulated response time failed: ",err); - } } - - /* Placeholder for background tasks to run during resource creation */ - private static void doOtherThings() { - // Not much to do right now :) - } - - /* Delete the resources created for this example. */ - private static void cleanup() { - if (container != null) - logger.info("Deleting container..."); - container.delete(); - - if (database != null) - logger.info("Deleting database..."); - database.delete(); - - client.close(); - } - - /* Generate ArrayList of N unique documents (assumes /pk is id) */ - private static ArrayList generateDocs(int N) { - ArrayList docs = new ArrayList(); - ObjectMapper mapper = Utils.getSimpleObjectMapper(); - - try { - for (int i = 1; i <= N; i++) { - docs.add(mapper.readTree( - "{" + - "\"id\": " + - "\"" + System.currentTimeMillis() + "\"" + - "}" - )); - - Thread.sleep(2*i); // Unique ids w/ nonuniform spacing - } - } catch (Exception err) { - logger.error("Failed generating documents: ", err); - } - - return docs; - } - } diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java index 25c893d..b6345ff 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -1,4 +1,83 @@ package com.azure.cosmos.examples.requestthroughput.sync; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Profile; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemResponse; +import com.fasterxml.jackson.databind.JsonNode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; + public class SampleRequestThroughput { + + protected static Logger logger = LoggerFactory.getLogger(SampleRequestThroughput.class.getSimpleName()); + + public static void main(String[] args) { + try { + requestThroughputDemo(); + } catch(Exception err) { + logger.error("Failed running demo: ", err); + } + } + + private static CosmosClient client; + private static CosmosDatabase database; + private static CosmosContainer container; + + public static void requestThroughputDemo() { + ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); + + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + // This code synchronously sends a request to create a database. + // While the client waits for a response, this thread is blocked from + // performing other tasks. + database = client.createDatabaseIfNotExists("ContosoInventoryDB").getDatabase(); + logger.info("Got DB."); + CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); + container = database.createContainerIfNotExists(containerProperties, 100000).getContainer(); + logger.info("Got container."); + // Resources are ready. + // + // Create many docs to insert into the container + int number_of_docs = 4000000; + logger.info("Generating {} documents...", number_of_docs); + ArrayList docs = Profile.generateDocs(number_of_docs); + logger.info("Inserting {} documents...", number_of_docs); + + // Insert many docs synchronously. + // The client blocks waiting for a response to each insert request, + // which limits throughput. + // While the client is waiting for a response, the thread is blocked from other tasks + docs.forEach(doc -> { + CosmosItemResponse itemResponse = container.createItem(doc); + if (itemResponse.getStatusCode() != 201) + logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); + }); + + // Clean up + logger.info("Deleting resources."); + container.delete(); + database.delete(); + logger.info("Finished deleting resources."); + + logger.info("Closing client..."); + client.close(); + + logger.info("Done with demo."); + } } From 96aac3313d2b7e97b7139e22f3be4746add79a50 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 17 Mar 2020 03:19:46 -0700 Subject: [PATCH 052/110] Essentially finished Request Throughput Samples --- .../async/SampleRequestThroughputAsync.java | 25 +++++++++++-------- .../sync/SampleRequestThroughput.java | 2 +- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java index 975299d..395af50 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -33,9 +33,9 @@ public static void main(String[] args) { private static CosmosAsyncClient client; private static CosmosAsyncDatabase database; private static CosmosAsyncContainer container; - private static AtomicBoolean resourcesCreated = new AtomicBoolean(false); - private static AtomicInteger numberOfDocsInserted = new AtomicInteger(0); - private static AtomicBoolean resourcesDeleted = new AtomicBoolean(false); + private static AtomicBoolean resources_created = new AtomicBoolean(false); + private static AtomicInteger number_docs_inserted = new AtomicInteger(0); + private static AtomicBoolean resources_deleted = new AtomicBoolean(false); public static void requestThroughputDemo() { ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); @@ -54,7 +54,7 @@ public static void requestThroughputDemo() { database = databaseResponse.getDatabase(); logger.info("Got DB."); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); - return database.createContainerIfNotExists(containerProperties, 100000); + return database.createContainerIfNotExists(containerProperties, 400); }).flatMap(containerResponse -> { container = containerResponse.getContainer(); logger.info("Got container."); @@ -67,12 +67,12 @@ public static void requestThroughputDemo() { databaseContainerIfNotExist.subscribe(voidItem -> {}, err -> {}, () -> { logger.info("Finished creating resources.\n\n"); - resourcesCreated.set(true); + resources_created.set(true); }); // ...so we can do other things until async response arrives! logger.info("Doing other things until async resource creation completes......"); - while (!resourcesCreated.get()) Profile.doOtherThings(); + while (!resources_created.get()) Profile.doOtherThings(); // Container is created. Generate many docs to insert. int number_of_docs = 4000000; @@ -82,6 +82,11 @@ public static void requestThroughputDemo() { // Insert many docs into container... logger.info("Inserting {} documents...", number_of_docs); docs.forEach(doc -> { + try { + Thread.sleep(12); + } catch (Exception err) { + logger.error("Error throttling programmatically: ",err); + } // ...by describing logic of item insertion using Reactor. Then subscribe() to execute. container.createItem(doc) // ^Publisher: upon subscription, createItem inserts a doc & @@ -89,7 +94,7 @@ public static void requestThroughputDemo() { .flatMap(itemResponse -> { // ...Streaming operation: count each doc & check success... if (itemResponse.getStatusCode() == 201) - numberOfDocsInserted.getAndIncrement(); + number_docs_inserted.getAndIncrement(); else logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); return Mono.empty(); @@ -98,7 +103,7 @@ public static void requestThroughputDemo() { // Do other things until async response arrives logger.info("Doing other things until async doc inserts complete..."); - while (numberOfDocsInserted.get() < number_of_docs) Profile.doOtherThings(); + while (number_docs_inserted.get() < number_of_docs) Profile.doOtherThings(); // Inserts are complete. Cleanup (asynchronously!) logger.info("Deleting resources."); @@ -107,12 +112,12 @@ public static void requestThroughputDemo() { .subscribe(dbItem -> {}, err -> {}, () -> { logger.info("Finished deleting resources."); - resourcesDeleted.set(true); + resources_deleted.set(true); }); // Do other things until async response arrives logger.info("Do other things until async resource delete completes..."); - while (!resourcesDeleted.get()) Profile.doOtherThings(); + while (!resources_deleted.get()) Profile.doOtherThings(); // Close client. This is always sync. logger.info("Closing client..."); diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java index b6345ff..d99fea0 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -49,7 +49,7 @@ public static void requestThroughputDemo() { database = client.createDatabaseIfNotExists("ContosoInventoryDB").getDatabase(); logger.info("Got DB."); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); - container = database.createContainerIfNotExists(containerProperties, 100000).getContainer(); + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); logger.info("Got container."); // Resources are ready. // From b3ccb15c15b5c2a93d168a2ab0ffe775570e6569 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 18 Mar 2020 14:23:20 -0700 Subject: [PATCH 053/110] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index abbd187..147cb7a 100644 --- a/README.md +++ b/README.md @@ -71,10 +71,10 @@ where *sample.synchronicity.MainClass* can be *Build and execute from command line without an IDE:* From top-level directory of repo: ``` mvn clean package -mvn exec:java -Dexec.mainClass="com.azure.cosmos.examples.changefeed.sample" -DACCOUNT_HOST=your account hostname -DACCOUNT_KEY=your account master key +mvn exec:java -Dexec.mainClass="com.azure.cosmos.examples.sample.synchronicity.MainClass" -DACCOUNT_HOST=your account hostname -DACCOUNT_KEY=your account master key ``` -where *sample*, *your account hostname*, and *your account master key* are to be filled in as above. This will rebuild and run the selected sample. +where *sample.synchronicity.MainClass*, *your account hostname*, and *your account master key* are to be filled in as above. This will rebuild and run the selected sample. ## Key concepts From 9226c2132b74d0ee591ff56a541ded42d1174829 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 23 Mar 2020 21:45:13 -0700 Subject: [PATCH 054/110] Manually merged changes to Worked App Sample and request throughput sample --- .../async/SampleRequestThroughputAsync.java | 71 ++-- .../sync/SampleRequestThroughput.java | 65 +++- .../workedappexample/SampleGroceryStore.java | 338 ++++++++++++++++++ 3 files changed, 444 insertions(+), 30 deletions(-) create mode 100644 src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java index 395af50..5c6af17 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -10,8 +10,11 @@ import com.azure.cosmos.examples.common.Profile; import com.azure.cosmos.models.CosmosContainerProperties; import com.fasterxml.jackson.databind.JsonNode; +import com.google.common.util.concurrent.AtomicDouble; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.util.ArrayList; @@ -36,6 +39,7 @@ public static void main(String[] args) { private static AtomicBoolean resources_created = new AtomicBoolean(false); private static AtomicInteger number_docs_inserted = new AtomicInteger(0); private static AtomicBoolean resources_deleted = new AtomicBoolean(false); + private static AtomicDouble total_charge = new AtomicDouble(0.0); public static void requestThroughputDemo() { ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); @@ -52,12 +56,12 @@ public static void requestThroughputDemo() { // Describe the logic of database and container creation using Reactor... Mono databaseContainerIfNotExist = client.createDatabaseIfNotExists("ContosoInventoryDB").flatMap(databaseResponse -> { database = databaseResponse.getDatabase(); - logger.info("Got DB."); + logger.info("\n\n\n\nCreated database ContosoInventoryDB.\n\n\n\n"); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); - return database.createContainerIfNotExists(containerProperties, 400); + return database.createContainerIfNotExists(containerProperties, 100000); }).flatMap(containerResponse -> { container = containerResponse.getContainer(); - logger.info("Got container."); + logger.info("\n\n\n\nCreated container ContosoInventoryContainer.\n\n\n\n"); return Mono.empty(); }); @@ -81,29 +85,49 @@ public static void requestThroughputDemo() { // Insert many docs into container... logger.info("Inserting {} documents...", number_of_docs); - docs.forEach(doc -> { - try { - Thread.sleep(12); - } catch (Exception err) { - logger.error("Error throttling programmatically: ",err); - } - // ...by describing logic of item insertion using Reactor. Then subscribe() to execute. - container.createItem(doc) - // ^Publisher: upon subscription, createItem inserts a doc & - // publishes request response to the next operation... - .flatMap(itemResponse -> { - // ...Streaming operation: count each doc & check success... - if (itemResponse.getStatusCode() == 201) - number_docs_inserted.getAndIncrement(); - else - logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); - return Mono.empty(); - }).subscribe(); // ...Subscribing to the publisher triggers stream execution. - }); + + Profile.tic(); + int last_docs_inserted=0; + double last_total_charge=0.0; + + Flux.fromIterable(docs).flatMap(doc -> container.createItem(doc)) + // ^Publisher: upon subscription, createItem inserts a doc & + // publishes request response to the next operation... + .flatMap(itemResponse -> { + // ...Streaming operation: count each doc & check success... + + if (itemResponse.getStatusCode() == 201) { + number_docs_inserted.getAndIncrement(); + total_charge.getAndAdd(itemResponse.getRequestCharge()); + } + else + logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); + return Mono.empty(); + }).subscribe(); // ...Subscribing to the publisher triggers stream execution. // Do other things until async response arrives logger.info("Doing other things until async doc inserts complete..."); - while (number_docs_inserted.get() < number_of_docs) Profile.doOtherThings(); + //while (number_docs_inserted.get() < number_of_docs) Profile.doOtherThings(); + double toc_time=0.0; + int current_docs_inserted=0; + double current_total_charge=0.0, rps=0.0, rups=0.0; + while (number_docs_inserted.get() < number_of_docs) { + toc_time=Profile.toc_ms(); + current_docs_inserted=number_docs_inserted.get(); + current_total_charge=total_charge.get(); + if (toc_time >= 1000.0) { + Profile.tic(); + rps=1000.0*((double)(current_docs_inserted-last_docs_inserted))/toc_time; + rups=1000.0*(current_total_charge-last_total_charge)/toc_time; + logger.info(String.format("\n\n\n\n" + + "Async Throughput Profiler Result, Last 1000ms:" + "\n\n" + + "%8s %8s", StringUtils.center("Req/sec",8),StringUtils.center("RU/s",8)) + "\n" + + "----------------------------------" + "\n" + + String.format("%8.1f %8.1f",rps,rups) + "\n\n\n\n"); + last_docs_inserted=current_docs_inserted; + last_total_charge=current_total_charge; + } + } // Inserts are complete. Cleanup (asynchronously!) logger.info("Deleting resources."); @@ -123,5 +147,6 @@ public static void requestThroughputDemo() { logger.info("Closing client..."); client.close(); logger.info("Done with demo."); + } } diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java index d99fea0..8ec56ac 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -7,15 +7,24 @@ import com.azure.cosmos.CosmosClientBuilder; import com.azure.cosmos.CosmosContainer; import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.ThrottlingRetryOptions; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Profile; import com.azure.cosmos.models.CosmosContainerProperties; import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.IndexingMode; +import com.azure.cosmos.models.IndexingPolicy; import com.fasterxml.jackson.databind.JsonNode; +import com.google.common.util.concurrent.AtomicDouble; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import java.time.Duration; import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicInteger; public class SampleRequestThroughput { @@ -32,14 +41,24 @@ public static void main(String[] args) { private static CosmosClient client; private static CosmosDatabase database; private static CosmosContainer container; + private static AtomicInteger number_docs_inserted = new AtomicInteger(0); + private static AtomicDouble total_charge = new AtomicDouble(0.0); + private static int last_docs_inserted=0; + private static double last_total_charge=0.0; + private static double toc_time=0.0; + private static int current_docs_inserted=0; + private static double current_total_charge=0.0, rps=0.0, rups=0.0; public static void requestThroughputDemo() { ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); + ThrottlingRetryOptions retry_options = new ThrottlingRetryOptions(); + //retry_options.setMaxRetryWaitTime(Duration.ZERO); + my_connection_policy.setThrottlingRetryOptions(retry_options); client = new CosmosClientBuilder() .setEndpoint(AccountSettings.HOST) .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConnectionPolicy(my_connection_policy) .setConsistencyLevel(ConsistencyLevel.EVENTUAL) .buildClient(); @@ -47,10 +66,14 @@ public static void requestThroughputDemo() { // While the client waits for a response, this thread is blocked from // performing other tasks. database = client.createDatabaseIfNotExists("ContosoInventoryDB").getDatabase(); - logger.info("Got DB."); + logger.info("\n\n\n\nCreated database ContosoInventoryDB.\n\n\n\n"); + //IndexingPolicy indexingPolicy = new IndexingPolicy(); + //indexingPolicy.setIndexingMode(IndexingMode.NONE); + //indexingPolicy.setAutomatic(false); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); - container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); - logger.info("Got container."); + //containerProperties.setIndexingPolicy(indexingPolicy); + container = database.createContainerIfNotExists(containerProperties, 100000).getContainer(); + logger.info("\n\n\n\nCreated container ContosoInventoryContainer.\n\n\n\n"); // Resources are ready. // // Create many docs to insert into the container @@ -59,15 +82,42 @@ public static void requestThroughputDemo() { ArrayList docs = Profile.generateDocs(number_of_docs); logger.info("Inserting {} documents...", number_of_docs); + Profile.tic(); + + //Profiler code - it's good for this part to be async + Flux.interval(Duration.ofMillis(10)).map(tick -> { + //logger.info("In profiler."); + toc_time=Profile.toc_ms(); + current_docs_inserted=number_docs_inserted.get(); + current_total_charge=total_charge.get(); + if (toc_time >= 1000.0) { + Profile.tic(); + rps=1000.0*((double)(current_docs_inserted-last_docs_inserted))/toc_time; + rups=1000.0*(current_total_charge-last_total_charge)/toc_time; + logger.info(String.format("\n\n\n\n" + + "Sync Throughput Profiler Result, Last 1000ms:" + "\n\n" + + "%8s %8s", StringUtils.center("Req/sec",8),StringUtils.center("RU/s",8)) + "\n" + + "----------------------------------" + "\n" + + String.format("%8.1f %8.1f",rps,rups) + "\n\n\n\n"); + last_docs_inserted=current_docs_inserted; + last_total_charge=current_total_charge; + } + return tick; + }).subscribe(); + // Insert many docs synchronously. // The client blocks waiting for a response to each insert request, // which limits throughput. // While the client is waiting for a response, the thread is blocked from other tasks - docs.forEach(doc -> { + for(JsonNode doc : docs) { CosmosItemResponse itemResponse = container.createItem(doc); - if (itemResponse.getStatusCode() != 201) + if (itemResponse.getStatusCode() == 201) { + number_docs_inserted.getAndIncrement(); + total_charge.getAndAdd(itemResponse.getRequestCharge()); + } + else logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); - }); + } // Clean up logger.info("Deleting resources."); @@ -79,5 +129,6 @@ public static void requestThroughputDemo() { client.close(); logger.info("Done with demo."); + } } diff --git a/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java b/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java new file mode 100644 index 0000000..701c0fb --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java @@ -0,0 +1,338 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples.workedappexample; + +import com.azure.cosmos.ChangeFeedProcessor; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosPagedFlux; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.FeedOptions; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.scheduler.Schedulers; + +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.util.Date; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * End-to-end application example code using Change Feed. + * + * This sample application inserts grocery store inventory data into an Azure Cosmos DB container; + * meanwhile, Change Feed runs in the background building a materialized view + * based on each document update. + * + * The materialized view facilitates efficient queries over item type. + * + */ +public class SampleGroceryStore { + + public static int WAIT_FOR_WORK = 60000; + public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); + public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); + private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); + protected static Logger logger = LoggerFactory.getLogger(SampleGroceryStore.class.getSimpleName()); + + + private static ChangeFeedProcessor changeFeedProcessorInstance; + private static boolean isWorkCompleted = false; + + private static CosmosAsyncContainer typeContainer; + + public static void main (String[]args) { + logger.info("BEGIN Sample"); + + try { + + System.out.println("Press enter to create the grocery store inventory system..."); + + System.out.println("-->CREATE DocumentClient"); + CosmosAsyncClient client = getCosmosClient(); + + System.out.println("-->CREATE Contoso Grocery Store database: " + DATABASE_NAME); + CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); + + System.out.println("-->CREATE container for store inventory: " + COLLECTION_NAME); + CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME, "/id"); + + System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); + CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); + + System.out.println("-->CREATE container for materialized view partitioned by 'type': " + COLLECTION_NAME + "-leases"); + typeContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME + "-pktype", "/type"); + + System.out.println("Press enter to add items to the grocery store inventory system..."); + + changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .doOnSuccess(aVoid -> { + //Insert 10 documents into the feed container + //createNewDocumentsJSON demonstrates how to insert a JSON object into a Cosmos DB container as an item + createNewDocumentsJSON(feedContainer, 10, Duration.ofSeconds(3)); + isWorkCompleted = true; + }) + .subscribe(); + + long remainingWork = WAIT_FOR_WORK; + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + + if (isWorkCompleted) { + if (changeFeedProcessorInstance != null) { + changeFeedProcessorInstance.stop().subscribe(); + } + } else { + throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); + } + + System.out.println("Press enter to query the materialized view..."); + + queryItems("SELECT * FROM c WHERE c.type IN ('milk','pens')", typeContainer); + + System.out.println("Press enter to clean up & exit the sample code..."); + + System.out.println("-->DELETE sample's database: " + DATABASE_NAME); + deleteDatabase(cosmosDatabase); + + Thread.sleep(500); + + } catch (Exception e) { + e.printStackTrace(); + } + + System.out.println("END Sample"); + } + + public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { + return ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + for (JsonNode document : docs) { + //Duplicate each document update from the feed container into the materialized view container + updateInventoryTypeMaterializedView(document); + } + + }) + .build(); + } + + private static void updateInventoryTypeMaterializedView(JsonNode document) { + typeContainer.createItem(document).subscribe(); + } + + public static CosmosAsyncClient getCosmosClient() { + + return new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + } + + public static CosmosAsyncDatabase createNewDatabase(CosmosAsyncClient client, String databaseName) { + return client.createDatabaseIfNotExists(databaseName).block().getDatabase(); + } + + public static void deleteDatabase(CosmosAsyncDatabase cosmosDatabase) { + cosmosDatabase.delete().block(); + } + + public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, String databaseName, String collectionName, String partitionKey) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer collectionLink = databaseLink.getContainer(collectionName); + CosmosAsyncContainerResponse containerResponse = null; + + try { + containerResponse = collectionLink.read().block(); + + if (containerResponse != null) { + throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, partitionKey); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); + + if (containerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); + } + + return containerResponse.getContainer(); + } + + public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient client, String databaseName, String leaseCollectionName) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); + CosmosAsyncContainerResponse leaseContainerResponse = null; + + try { + leaseContainerResponse = leaseCollectionLink.read().block(); + + if (leaseContainerResponse != null) { + leaseCollectionLink.delete().block(); + + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); + + if (leaseContainerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); + } + + return leaseContainerResponse.getContainer(); + } + + public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { + System.out.println("Creating documents\n"); + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { + + String jsonString = "{\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"" + + "," + + "\"brand\" : \"" + ((char)(65+i)) + "\"" + + "," + + "\"type\" : \"" + ((char)(69+i)) + "\"" + + "," + + "\"expiryDate\" : \"" + "2020-03-" + StringUtils.leftPad(String.valueOf(5+i), 2, "0") + "\"" + + "}"; + + ObjectMapper mapper = new ObjectMapper(); + JsonNode document = null; + + try { + document = mapper.readTree(jsonString); + } catch (Exception e) { + e.printStackTrace(); + } + + containerClient.createItem(document).subscribe(doc -> { + System.out.println(".\n"); + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } + + public static void queryItems(String query, CosmosAsyncContainer container) { + + FeedOptions queryOptions = new FeedOptions(); + queryOptions.setMaxItemCount(10); + // Set populate query metrics to get metrics around query executions + queryOptions.setPopulateQueryMetrics(true); + + CosmosPagedFlux pagedFluxResponse = container.queryItems( + query, queryOptions, JsonNode.class); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + pagedFluxResponse.byPage().subscribe( + fluxResponse -> { + logger.info("Got a page of query result with " + + fluxResponse.getResults().size() + " items(s)" + + " and request charge of " + fluxResponse.getRequestCharge()); + + /* + fluxResponse.getResults() + + logger.info("Item Ids " + fluxResponse + .getResults() + .stream() + .map(JsonNode::get("id")) + .collect(Collectors.toList())); + + */ + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + } + +} From 43f8bc81e518049d57abffe79a0dd44ca84c5ef5 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 24 Mar 2020 14:02:54 -0700 Subject: [PATCH 055/110] Sanified RUs and number of docs to 400RU/s and 50K docs, respectively. --- .../requestthroughput/async/SampleRequestThroughputAsync.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java index 5c6af17..2600bf6 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -58,7 +58,7 @@ public static void requestThroughputDemo() { database = databaseResponse.getDatabase(); logger.info("\n\n\n\nCreated database ContosoInventoryDB.\n\n\n\n"); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); - return database.createContainerIfNotExists(containerProperties, 100000); + return database.createContainerIfNotExists(containerProperties, 400); }).flatMap(containerResponse -> { container = containerResponse.getContainer(); logger.info("\n\n\n\nCreated container ContosoInventoryContainer.\n\n\n\n"); @@ -79,7 +79,7 @@ public static void requestThroughputDemo() { while (!resources_created.get()) Profile.doOtherThings(); // Container is created. Generate many docs to insert. - int number_of_docs = 4000000; + int number_of_docs = 50000; logger.info("Generating {} documents...", number_of_docs); ArrayList docs = Profile.generateDocs(number_of_docs); From f5785cd518e8ce8fe52b200b97e713e72d2fd1a6 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 24 Mar 2020 14:07:02 -0700 Subject: [PATCH 056/110] Sanified RUs and number of docs to 400RU/s and 50K docs, respectively. Added disclaimer. --- .../sync/SampleRequestThroughput.java | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java index 8ec56ac..905755d 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -26,6 +26,20 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicInteger; +/* + * Sync Request Throughput Sample + * + * Please note that perf testing incurs costs for provisioning container throughput and storage. + * + * This throughput profiling sample issues high-throughput document insert requests to an Azure Cosmos DB container. + * Run this code in a geographically colocated VM for best performance. + * + * Example configuration + * -Provision 100000 RU/s container throughput + * -Generate 4M documents + * -Result: ~60K RU/s actual throughput + */ + public class SampleRequestThroughput { protected static Logger logger = LoggerFactory.getLogger(SampleRequestThroughput.class.getSimpleName()); @@ -72,12 +86,12 @@ public static void requestThroughputDemo() { //indexingPolicy.setAutomatic(false); CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); //containerProperties.setIndexingPolicy(indexingPolicy); - container = database.createContainerIfNotExists(containerProperties, 100000).getContainer(); + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); logger.info("\n\n\n\nCreated container ContosoInventoryContainer.\n\n\n\n"); // Resources are ready. // // Create many docs to insert into the container - int number_of_docs = 4000000; + int number_of_docs = 50000; logger.info("Generating {} documents...", number_of_docs); ArrayList docs = Profile.generateDocs(number_of_docs); logger.info("Inserting {} documents...", number_of_docs); From 5357885c6a03724ee777aa962a8c83fa4f412944 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 24 Mar 2020 14:08:00 -0700 Subject: [PATCH 057/110] Added disclaimer --- .../async/SampleRequestThroughputAsync.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java index 2600bf6..8b637ad 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -21,6 +21,21 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; + +/* + * Async Request Throughput Sample + * + * Please note that perf testing incurs costs for provisioning container throughput and storage. + * + * This throughput profiling sample issues high-throughput document insert requests to an Azure Cosmos DB container. + * Run this code in a geographically colocated VM for best performance. + * + * Example configuration + * -Provision 100000 RU/s container throughput + * -Generate 4M documents + * -Result: ~60K RU/s actual throughput + */ + public class SampleRequestThroughputAsync { protected static Logger logger = LoggerFactory.getLogger(SampleRequestThroughputAsync.class.getSimpleName()); From 863bd64c59ab9be240113033b180815f91ce8868 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 24 Mar 2020 14:08:47 -0700 Subject: [PATCH 058/110] Fixed disclaimer --- .../requestthroughput/sync/SampleRequestThroughput.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java index 905755d..14b23c1 100644 --- a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -34,10 +34,6 @@ * This throughput profiling sample issues high-throughput document insert requests to an Azure Cosmos DB container. * Run this code in a geographically colocated VM for best performance. * - * Example configuration - * -Provision 100000 RU/s container throughput - * -Generate 4M documents - * -Result: ~60K RU/s actual throughput */ public class SampleRequestThroughput { From beb202d9fb749822c6700cc2e7abf87b05a27dfd Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 25 Mar 2020 10:14:29 -0700 Subject: [PATCH 059/110] Removed RxJava vs Reactor --- reactor-pattern-guide.md | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index c88a62c..aa2016d 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -85,20 +85,6 @@ public void calling_function() { This approach of defining Reactive Streams and then subscribing to them later can be useful - **just remember that the logic of your Reactive Stream will not be executed until you ```subscribe()``` to it.** -TODO: -* Introduce ```Mono``` and ```Flux``` -* Reactor Factory Methods -* Operations to transform data - ```flatMap()```, ```reduce()```, nested imperative code -* More about ```subscribe()``` and ```onNext()```/```onComplete```/```onError``` and ```block()``` -* ```subscribeOn()```, ```publishOn```, schedulers -* ```onSuccess()``` - -## Reactor vs RxJava - -* Compare all of the above between Reactor and RxJava -* rxJava observeOn <-> reactive stream publishOn -* rxJava subscribeOn <-> reactive stream subscribeOn - ## For More Information * If you would like to learn more about Project Reactor and Reactive Streams, or get started writing code using Reactor, you can visit [the Project Reactor website.](https://projectreactor.io/) From 79d6eb35bb91aa866daeb25130480f7848e0eb0c Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Wed, 25 Mar 2020 10:15:42 -0700 Subject: [PATCH 060/110] Removed RxJava vs Reactor content --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index aa2016d..160aa57 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -27,7 +27,7 @@ How this differs from imperative programming, is that the coder is describing th [Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework used in Java SDK v3.x.x and above. -The purpose of the rest of this document is to help you start using Reactor with as little trouble as possible. This includes suggestions for upgrading your code from RxJava to Reactor and also Reactor design pattern guidelines. +The purpose of the rest of this document is to help you start using Reactor with as little trouble as possible. ## Reactor Design Patterns From 5ecc63a07a0ba084c143331985bab3fbd58f4880 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 00:22:00 -0700 Subject: [PATCH 061/110] Update to Reactor Pattern guide --- reactor-pattern-guide.md | 107 ++++++++++++++++++++++++++------------- 1 file changed, 73 insertions(+), 34 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 160aa57..f79d6aa 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -31,59 +31,98 @@ The purpose of the rest of this document is to help you start using Reactor with ## Reactor Design Patterns -To write a program using Reactor, you will need to describe one or more Reactive Streams. In typical uses of Reactor, you describe a stream -by (1) creating a *Publisher* (which originates data asynchronously) and a *Subscriber* (which consumes data and operates on it asynchronously), and (2) -describing a pipeline from Publisher to Subscriber, in which the data from Publisher is transformed at each pipeline stage before eventually -ending in Subscriber. In this section we will discuss this process in more detail and demonstrate how Reactor lets you define the transforming operation at each -pipeline stage. +To write a program using Reactor, you will need to describe one or more Reactive Streams. In typical uses of Reactor, you describe a stream by (1) creating a *Publisher* (which originates data asynchronously) and a *Subscriber* (which consumes data and operates on it asynchronously), and (2) describing a pipeline from Publisher to Subscriber, in which the data from Publisher is transformed at each pipeline stage before eventually ending in Subscriber. -### 1. ***Describing a Reactive Stream (A Publisher-Subscriber Pipeline)*** +Reactor follows a "hybrid push-pull model": your code is triggered on an event-driven basis by the Publisher, but ***only*** once you signal the Publisher via a Subscription. -Here is a simple Reactive Stream: +Consider a "typical" program you might be used to writing. You are writing a piece of code, but it takes a dependency on other code with unpredictable response time. For example, maybe you wrote a function to perform a calculation, and one input comes from calling a function that requests data over TCP/IP. You might typically deal with this by implementing a control flow in which you first call the dependency code, wait for it to return output, and then provide that output to the other piece of code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for a TCP/IP request); the next piece of code has to wait. + +In a "push" model the dependency signals the next piece of code to consume output when it becomes available; otherwise, your code is dormant, freeing up CPU cycles. This is a more event-driven concept. But in order for the dependency to signal the next piece of code, it has to know that it is a dependency – in a Reactive program we have to define the dependency relations in advance. + +```java +Assembly phase (define dependency relations as a pipeline) +Subscribe phase (execute pipeline on incoming events) + +Flux reminderPipeline = +ReminderAsyncService.getRemindersPublisher() + .flatMap(reminder -> “Don’t forget: ” + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Nothing executed yet + +reminderPipeline.subscribe(System.out::println); // Async – returns immediately +while (true) doOtherThings(); // We’re freed up to do other tasks 😊 +``` + +A Flux in Reactor represents … and is the general-purpose class for doing so. In the assembly phase, you are describing program logic as an async operation pipeline, but not actually executing it yet. So ReminderAsyncService.getRemindersPublisher() returns a Flux representing just the reminders publisher. ReminderAsyncService.getRemindersPublisher().flatMap(reminder -> “Don’t forget: ” + reminder) returns an augmented Flux that represents the reminders publisher followed by the “Don’t forget: ” + reminder operation that consumes the publisher’s output and prepends “Don’t forget: ”. And ReminderAsyncService.getRemindersPublisher().flatMap(reminder -> “Don’t forget: ” + reminder).flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn) returns an even further augmented Flux that represents the reminders publisher followed by the “Don’t forget: ” + reminder operation, followed subsequently by an operation which prepends a timestamp to the output of the previous step. In each case, the output is Flux, where T is the output type of the transformation applied at that stage. So hypothetically if you defined an async operation pipeline which ate int’s and spat out Strings, the output of the assembly phase would be a Flux representing the pipeline. +In the subscription phase you execute what you defined in the assembly phase. Here is how that works. You call + + ```java + reminderPipeline.subscribe(System.out::println); //Async – returns immediately + ``` + +and subscribe() will generate a Subscription instance requesting all events that RemindersPublisher will ever produce. Reactor framework propagates this Subscription instance backwards up the pipeline to the RemindersPublisher instance. The RemindersPublisher instance reads this Subscription and responds by pushing an event into the pipeline every time there is a new reminder. The publisher will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the Subscription (which is infinity in this case, so the publisher will just keep going.) +When I say that the publisher pushes events into the pipeline, I mean that the publisher issues an onNext signal to the next pipeline stage (“Don’t forget:” + reminder) paired with a String argument containing the reminder. flatMap() responds to an onNext signal by taking the String argument and applying the specified transformation to it (in this case, prepending the words “Don’t forget:”). This signal propagates down the pipeline: the “Don’t forget:” + reminder stage issues an onNext signal to the next stage with its output as the argument; then the LocalDateTime.now().toString() + “:” + strIn stage issues an onNext signal to the next stage with its output as the argument. Now what happens after that is special – we reached the last pipeline stage, so what happens to final-stage onNext signal and its associated String argument? The answer is that the Subscription created by the subscribe() call implements a method for handling onNext signals, which you can customize for your application. This method is expected to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, or doing something else before discarding the data entirely. Therefore the Subscription instance serves a dual purpose as the true “last stage” of the pipeline. The argument to subscribe() is the content of the onNext handler. Since we called ```java -Flux.just("Hello","Cosmos DB") - .subscribe(System.out::println); +reminderPipeline.subscribe(System.out::println); //Async – returns immediately ``` -The Publisher is ``` Flux.just("Hello","Cosmos DB") ```. ```Flux.just()``` is a *Reactor factory method* which allows you to define a Publisher. -``` Flux.just("Hello","Cosmos DB") ``` will asynchronously send ```Hello``` and ```Cosmos DB``` as two Strings to the next stage of the Publisher-Subscriber -pipeline. +with System.out::println as the argument, now each time Subscription.onNext() is called the associated String output from the pipeline will be printed to the terminal using System.out::println. + +That was a lot. So let’s step back for a moment and mention a few key points. +* Assembly phase is when you define a series of async operations and a dependency graph (pipeline) connecting them. +* Subscribe phase is when you actually execute that logic by creating a subscription. The Subscription doubles as the terminal pipeline stage. +* The async operation pipeline starts with a publisher, which will push events into the pipeline once it receives a Subscription for N events in the subscribe phase. +* Observe that a Subscription for N events is a type of pull operation, because one piece of code (Subscription) is calling for output from other code that is depends on (Publisher). Publisher controls the rate and timing of events, until it exhausts the N events requested by the Subscriber. +* So keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the subscriber. This enables the implementation of backpressure, whereby the subscriber can size subscriptions to adjust the rate of publisher events if they are coming too slow or too fast to process. +* subscribe() is Reactor’s built-in subscription generator, it -Here, the Publisher-Subscriber pipeline is simple - the next pipeline stage after the Publisher is the Subscriber, ```.subscribe(System.out::println)```, which -will receive the two Strings as they arrive from upstream and process them by applying ```System.out::println``` to each one, again asynchronously. -The output would be +Flux supports publishers with 0, 1, or N events, where N can be finite or infinite. The assembly stage for a publisher with N=3 events is shown below ```java -Hello -Cosmos DB +Flux reminderPipeline = + Flux.just(“Wash the dishes”,“Mow the lawn”,”Sleep”) // Publisher, 3 events + .flatMap(reminder -> “Don’t forget: ” + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Nothing executed yet ``` -This is a simple Publisher-Subscriber pipeline with no operations to transform the data. -The call to ```subscribe()``` is what ultimately triggers data to flow through the Reactive Stream -and carry out the logic of your program. Simply calling +And upon subscription, ```java -Flux.just("Hello","Cosmos DB"); +reminderPipeline.subscribe(System.out::println); ``` -without calling ```subscribe()``` will **not** execute the logic of your program; this line will simply return a ```Flux``` which represents -the pipeline of operations starting from the Publisher (which in this case, consists only of the Publisher). This ```Flux``` can be stored in a -variable and used like any other variable. For example you can return its value and use that value elsewhere in the program, i.e. by subscribing to it in another function: +will output the three Strings shown (corresponding to three publisher events pushed into the pipeline) and then stop. Suppose now we want to add two special behaviors to our program: (1) After all N Strings have been printed, print “End of reminders.” so the user knows we are finished. (2) Print any exceptions which occur during execution. A modification to the subscribe() call handles all of this: ```java -private Flux some_function() { - return Flux.just("Hello","Cosmos DB"); -} - -public void calling_function() { - Flux str_flux = some_function(); //Returns a representation of a Reactive Stream - str_flux.subscribe(System.out::println); //Produces the same output as the original example, by subscribing to the Reactive Stream -} +reminderPipeline.subscribe(strIn -> { + System.out.println(strIn); +}, +err -> { + err.printStackTrace(); +}, +() -> { + System.out.println(“End of reminders.”); +}); ``` -This approach of defining Reactive Streams and then subscribing to them later can be useful - **just remember that the logic of your Reactive Stream will -not be executed until you ```subscribe()``` to it.** +Let’s break this down. Remember we said that the argument to subscribe() determines how it handles incoming signals such as onNext? Reactor actually has three important signals which propagate state information along the pipeline: onNext, onComplete, and onError. As shown below, we just modified the subscribe() argument to handle all three: + +```java +reminderPipeline.subscribe(strIn -> { + System.out.println(strIn); +}, +err -> { + err.printStackTrace(); +}, +() -> { + System.out.println(“End of reminders.”); +}); +``` + + +```java +Mono exampleMono = Mono +``` ## For More Information From 1910551d329da329d03c61d623f87dd9c76f786f Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 01:17:52 -0700 Subject: [PATCH 062/110] Restructured and made small improvements to Reactor Pattern Guide --- reactor-pattern-guide.md | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index f79d6aa..2441756 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,10 +1,10 @@ -# Reactive Pattern Guide +# Reactor Pattern Guide -## Background: Reactive Programming, Reactive Streams, Reactor, Rx Java, and Project Reactive +## Background -### 1. ***Reactive Programming and Standards*** +### 1. Reactive Programming and the Reactive Streams Standard -Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of data items passing through a pipeline of operations in which each operation affects the data which flows downstream. +Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of data items passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming. **Imperative programming** is the more common or "familiar" programming paradigm in which program operation and control flow are expressed by sequential commands which manipulate program state (variables). A simple imperative program in pseudocode is @@ -14,22 +14,23 @@ Reactive Programming is a declarative programming paradigm in which program oper Then do operation3 on variable z And then print the result -Reactive Programming is a **declarative** paradigm - specifically a **dataflow** paradigm - in which the programmer must describe a directed graph of operations which represents the logic of the program. A simple declarative dataflow representation of the above program in pseudocode is: +Specifically, Reactive Programming is a **declarative dataflow** paradigm - the programmer must describe a directed acyclic graph (DAG) of operations which represents the logic of the program and the flow of data. A simple declarative dataflow representation of the above program in pseudocode is: asynchronous data source => operation1 => operation2 => operation3 => print -How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on the slowest pipelined operation. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process - whereas in a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level to ensure that no operation receives data faster than it can process. +How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on what the recipient of the data can handle. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process for each async operation to respond to events. In a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level to ensure that no operation receives data faster than it can process. -[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for the asynchronous programming libraries which have been used in the Cosmos DB Async Java SDKs. +[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for new Azure's new async SDKs. -### 2. ***Available Reactive Streams Frameworks for Java/JVM*** -[RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM) is no longer being used after Java SDK v2.x.x. +### 2. Reactive Streams Frameworks for Java/JVM -[Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework used in Java SDK v3.x.x and above. +Reactive Streams frameworks implement the Reactive Streams Standard for specific programming languages. [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM) was the basis of past Azure Java SDKs, but will not be going forward. -The purpose of the rest of this document is to help you start using Reactor with as little trouble as possible. +[Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. -## Reactor Design Patterns +## Reactor design patterns + +### 1. Assembly and execution To write a program using Reactor, you will need to describe one or more Reactive Streams. In typical uses of Reactor, you describe a stream by (1) creating a *Publisher* (which originates data asynchronously) and a *Subscriber* (which consumes data and operates on it asynchronously), and (2) describing a pipeline from Publisher to Subscriber, in which the data from Publisher is transformed at each pipeline stage before eventually ending in Subscriber. @@ -76,6 +77,8 @@ That was a lot. So let’s step back for a moment and mention a few key points. * So keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the subscriber. This enables the implementation of backpressure, whereby the subscriber can size subscriptions to adjust the rate of publisher events if they are coming too slow or too fast to process. * subscribe() is Reactor’s built-in subscription generator, it +### 2. ```Flux```, ```Mono```, and ```subscribe()``` + Flux supports publishers with 0, 1, or N events, where N can be finite or infinite. The assembly stage for a publisher with N=3 events is shown below ```java @@ -119,11 +122,14 @@ err -> { }); ``` +Mono example ```java Mono exampleMono = Mono ``` +### 3. A selection of useful Reactor operations + ## For More Information * If you would like to learn more about Project Reactor and Reactive Streams, or get started writing code using Reactor, you can visit [the Project Reactor website.](https://projectreactor.io/) From 5d561d94099c32c0b726a319f2cd8752ebdc786c Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 02:42:27 -0700 Subject: [PATCH 063/110] Progress on reactor pattern guide --- reactor-pattern-guide.md | 76 +++++++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 24 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 2441756..8f47d76 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,5 +1,7 @@ # Reactor Pattern Guide +The purpose of this guide is to help you get started using Reactor-based Java SDKs by understanding Reactor-based design patterns. It is recommended to read the [Project Reactor](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) documentation if you want to learn more. + ## Background ### 1. Reactive Programming and the Reactive Streams Standard @@ -30,52 +32,78 @@ Reactive Streams frameworks implement the Reactive Streams Standard for specific ## Reactor design patterns -### 1. Assembly and execution +### 1. Assemble and Subscribe phases -To write a program using Reactor, you will need to describe one or more Reactive Streams. In typical uses of Reactor, you describe a stream by (1) creating a *Publisher* (which originates data asynchronously) and a *Subscriber* (which consumes data and operates on it asynchronously), and (2) describing a pipeline from Publisher to Subscriber, in which the data from Publisher is transformed at each pipeline stage before eventually ending in Subscriber. +To write a program using Reactor, you will need to describe one or more async operation pipelines for processing Reactive Streams. In typical uses of Reactor, you describe a pipeline by -Reactor follows a "hybrid push-pull model": your code is triggered on an event-driven basis by the Publisher, but ***only*** once you signal the Publisher via a Subscription. +1. Creating a ```Publisher``` (which pushes events and data into the pipeline asynchronously) and a ```Subscriber``` (which consumes events and data from the pipeline and operates on them asynchronously) -Consider a "typical" program you might be used to writing. You are writing a piece of code, but it takes a dependency on other code with unpredictable response time. For example, maybe you wrote a function to perform a calculation, and one input comes from calling a function that requests data over TCP/IP. You might typically deal with this by implementing a control flow in which you first call the dependency code, wait for it to return output, and then provide that output to the other piece of code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for a TCP/IP request); the next piece of code has to wait. +2. Describing each stage in the pipeline programmatically, in terms of how it processes data from the previous stage. -In a "push" model the dependency signals the next piece of code to consume output when it becomes available; otherwise, your code is dormant, freeing up CPU cycles. This is a more event-driven concept. But in order for the dependency to signal the next piece of code, it has to know that it is a dependency – in a Reactive program we have to define the dependency relations in advance. +Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` via a ```Subscription```. -```java -Assembly phase (define dependency relations as a pipeline) -Subscribe phase (execute pipeline on incoming events) +To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over TCP/IP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for a TCP/IP request example); your code has to loop waiting for the dependency. + +In a "push" model the dependency signals your code to consume the TCP/IP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. +Now I will illustrate this with Reactor code examples. + +**Assembly phase (define dependency relations as a pipeline)** +```java Flux reminderPipeline = ReminderAsyncService.getRemindersPublisher() .flatMap(reminder -> “Don’t forget: ” + reminder) .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Nothing executed yet +``` + +**Subscribe phase (execute pipeline on incoming events)** +```java +reminderPipeline.subscribe(System.out::println); // Async – returns immediately, pipeline executes in the background -reminderPipeline.subscribe(System.out::println); // Async – returns immediately while (true) doOtherThings(); // We’re freed up to do other tasks 😊 ``` -A Flux in Reactor represents … and is the general-purpose class for doing so. In the assembly phase, you are describing program logic as an async operation pipeline, but not actually executing it yet. So ReminderAsyncService.getRemindersPublisher() returns a Flux representing just the reminders publisher. ReminderAsyncService.getRemindersPublisher().flatMap(reminder -> “Don’t forget: ” + reminder) returns an augmented Flux that represents the reminders publisher followed by the “Don’t forget: ” + reminder operation that consumes the publisher’s output and prepends “Don’t forget: ”. And ReminderAsyncService.getRemindersPublisher().flatMap(reminder -> “Don’t forget: ” + reminder).flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn) returns an even further augmented Flux that represents the reminders publisher followed by the “Don’t forget: ” + reminder operation, followed subsequently by an operation which prepends a timestamp to the output of the previous step. In each case, the output is Flux, where T is the output type of the transformation applied at that stage. So hypothetically if you defined an async operation pipeline which ate int’s and spat out Strings, the output of the assembly phase would be a Flux representing the pipeline. -In the subscription phase you execute what you defined in the assembly phase. Here is how that works. You call +The ```Flux``` class internally represents an async operation pipeline as a DAG and provides instance methods for operating on the pipeline. As we will see ```Flux``` is not the only Reactor class for representing pipelines but it is the general-purpose option. + +In the **Assembly phase** shown above, you describe program logic as an async operation pipeline (a ```Flux```), but don't actually execute it just yet. Let's break down how the async operation pipeline is built in the **Assembly phase** snippet above: + +* **Stage 1**: ```ReminderAsyncService.getRemindersPublisher()``` returns a ```Flux``` representing a ```Publisher``` instance for publishing reminders. + +* **Stage 2**: ```.flatMap(reminder -> “Don’t forget: ” + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline consisting of the reminders ```Publisher``` followed by the ```reminder -> “Don’t forget: ” + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) + +* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline consisting of the reminders ```Publisher```, the **Stage 2** operation, and finally the ```strIn -> LocalDateTime.now().toString() + “: ”+ strIn``` operation, which timestamps the **Stage 2** output string. So hypothetically if you defined an async operation pipeline which ate int’s and spat out Strings, the output of the assembly phase would be a Flux representing the pipeline. + +In the **Subscribe phase** you execute the pipeline that you defined in the Assembly phase. Here is how that works. You call - ```java - reminderPipeline.subscribe(System.out::println); //Async – returns immediately - ``` +```java +reminderPipeline.subscribe(System.out::println); //Async – returns immediately +``` -and subscribe() will generate a Subscription instance requesting all events that RemindersPublisher will ever produce. Reactor framework propagates this Subscription instance backwards up the pipeline to the RemindersPublisher instance. The RemindersPublisher instance reads this Subscription and responds by pushing an event into the pipeline every time there is a new reminder. The publisher will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the Subscription (which is infinity in this case, so the publisher will just keep going.) -When I say that the publisher pushes events into the pipeline, I mean that the publisher issues an onNext signal to the next pipeline stage (“Don’t forget:” + reminder) paired with a String argument containing the reminder. flatMap() responds to an onNext signal by taking the String argument and applying the specified transformation to it (in this case, prepending the words “Don’t forget:”). This signal propagates down the pipeline: the “Don’t forget:” + reminder stage issues an onNext signal to the next stage with its output as the argument; then the LocalDateTime.now().toString() + “:” + strIn stage issues an onNext signal to the next stage with its output as the argument. Now what happens after that is special – we reached the last pipeline stage, so what happens to final-stage onNext signal and its associated String argument? The answer is that the Subscription created by the subscribe() call implements a method for handling onNext signals, which you can customize for your application. This method is expected to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, or doing something else before discarding the data entirely. Therefore the Subscription instance serves a dual purpose as the true “last stage” of the pipeline. The argument to subscribe() is the content of the onNext handler. Since we called +and + +* ```subscribe()``` will generate a ```Subscription``` instance requesting ***all*** events that ```RemindersPublisher``` will ever produce. + +* Reactor framework propagates this ```Subscription``` instance backwards up the pipeline to the ```RemindersPublisher``` instance. + +* The ```RemindersPublisher``` instance reads this ```Subscription``` and responds by pushing an event into the pipeline every time there is a new reminder. The publisher will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the ```Subscription``` (which is infinity in this case, so the ```Publisher``` will just keep going.) + +When I say that the ```Publisher``` "pushes events into the pipeline", I mean that the ```Publisher``` issues an ```onNext``` signal to the second pipeline stage (```.flatMap(reminder -> “Don’t forget: ” + reminder)```) paired with a ```String``` argument containing the reminder. ```flatMap()``` responds to an ```onNext``` signal by taking the ```String``` data passed in and applying the transformation that is in ```flatMap()```'s argument parentheses to the input data (in this case, by prepending the words “Don’t forget: ”). This signal propagates down the pipeline: pipeline Stage 2 issues an ```onNext``` signal to pipeline Stage 3 (```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)```) with its output as the argument; and then pipeline Stage 3 issues its own output along with an ```onNext``` signal. + +Now what happens after next is different – the ```onNext``` signal reached the last pipeline stage, so what happens to final-stage ```onNext``` signal and its associated ```String``` argument? The answer is that when you called ```subscribe()```, ```subscribe()``` also created a ```Subscriber``` instance which implements a method for handling ```onNext``` signals and serves as the last stage of the pipeline. The ```Subscriber```'s ```onNext``` handler will call whatever code you wrote in the argument parentheses of ```subscribe()```, allowing you to customize for your application. In the Subscribe phase snippet above, we called ```java reminderPipeline.subscribe(System.out::println); //Async – returns immediately -``` +``` + +which means that every time an ```onNext``` signal reaches the end of the operation pipeline, the ```Subscriber``` will call ```System.out.println()``` on the reminder ```String``` associated with the event and print it to the terminal. -with System.out::println as the argument, now each time Subscription.onNext() is called the associated String output from the pipeline will be printed to the terminal using System.out::println. +In the TCP/IP example I touch on earlier, ```subscribe()``` is analogous to your program, and the rest of the pipeline is analogous to the TCP/IP request dependency which your program services on an on-availability basis. In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you can call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. That was a lot. So let’s step back for a moment and mention a few key points. -* Assembly phase is when you define a series of async operations and a dependency graph (pipeline) connecting them. -* Subscribe phase is when you actually execute that logic by creating a subscription. The Subscription doubles as the terminal pipeline stage. -* The async operation pipeline starts with a publisher, which will push events into the pipeline once it receives a Subscription for N events in the subscribe phase. -* Observe that a Subscription for N events is a type of pull operation, because one piece of code (Subscription) is calling for output from other code that is depends on (Publisher). Publisher controls the rate and timing of events, until it exhausts the N events requested by the Subscriber. -* So keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the subscriber. This enables the implementation of backpressure, whereby the subscriber can size subscriptions to adjust the rate of publisher events if they are coming too slow or too fast to process. -* subscribe() is Reactor’s built-in subscription generator, it +* Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the subscriber. +* Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops +* This enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. +* ```subscribe()``` is Reactor’s built-in subscription generator, by default it requests all events from the ```Publisher```. [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. ### 2. ```Flux```, ```Mono```, and ```subscribe()``` From 3ee6223b895ac7adcc8906ab88c6ac704ce6dff9 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 03:55:08 -0700 Subject: [PATCH 064/110] Full content for Reactor pattern guide --- reactor-pattern-guide.md | 73 ++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 37 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 8f47d76..c4f632b 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -6,7 +6,7 @@ The purpose of this guide is to help you get started using Reactor-based Java SD ### 1. Reactive Programming and the Reactive Streams Standard -Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of data items passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming. +Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of data items passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming; it is an alternative to explicitly callback-based programming. **Imperative programming** is the more common or "familiar" programming paradigm in which program operation and control flow are expressed by sequential commands which manipulate program state (variables). A simple imperative program in pseudocode is @@ -40,20 +40,20 @@ To write a program using Reactor, you will need to describe one or more async op 2. Describing each stage in the pipeline programmatically, in terms of how it processes data from the previous stage. -Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` via a ```Subscription```. +Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` by **subscribing**. -To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over TCP/IP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for a TCP/IP request example); your code has to loop waiting for the dependency. +To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over HTTP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for a HTTP request example); your code has to loop waiting for the dependency. -In a "push" model the dependency signals your code to consume the TCP/IP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. +In a "push" model the dependency signals your code to consume the HTTP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. -Now I will illustrate this with Reactor code examples. +Now I will illustrate this with Reactor code examples. Consider a Reminders app. The app's job is a create a message to the user every time there is a new reminder for them. To find out if there are new reminders for the user, the ```ReminderAsyncService``` running on the user's phone periodically sends HTTP requests to the Reminders server. ```ReminderAsyncService``` has a Reactive implementation in which ```ReminderAsyncService.getRemindersPublisher()``` returns a ```RemindersPublisher``` instance which listens for HTTP responses from the server and pushes the resulting reminders to a Reactive Stream defined by the user. ```RemindersPublisher``` extends the ```Publisher``` interface. **Assembly phase (define dependency relations as a pipeline)** ```java Flux reminderPipeline = -ReminderAsyncService.getRemindersPublisher() - .flatMap(reminder -> “Don’t forget: ” + reminder) - .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Nothing executed yet +ReminderAsyncService.getRemindersPublisher() // Pipeline Stage 1 + .flatMap(reminder -> “Don’t forget: ” + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Stage 3 ``` **Subscribe phase (execute pipeline on incoming events)** @@ -69,11 +69,11 @@ In the **Assembly phase** shown above, you describe program logic as an async op * **Stage 1**: ```ReminderAsyncService.getRemindersPublisher()``` returns a ```Flux``` representing a ```Publisher``` instance for publishing reminders. -* **Stage 2**: ```.flatMap(reminder -> “Don’t forget: ” + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline consisting of the reminders ```Publisher``` followed by the ```reminder -> “Don’t forget: ” + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) +* **Stage 2**: ```.flatMap(reminder -> “Don’t forget: ” + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline consisting of the ```RemindersPublisher``` followed by the ```reminder -> “Don’t forget: ” + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) -* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline consisting of the reminders ```Publisher```, the **Stage 2** operation, and finally the ```strIn -> LocalDateTime.now().toString() + “: ”+ strIn``` operation, which timestamps the **Stage 2** output string. So hypothetically if you defined an async operation pipeline which ate int’s and spat out Strings, the output of the assembly phase would be a Flux representing the pipeline. - -In the **Subscribe phase** you execute the pipeline that you defined in the Assembly phase. Here is how that works. You call +* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline consisting of the ```RemindersPublisher```, the **Stage 2** operation, and finally the ```strIn -> LocalDateTime.now().toString() + “: ”+ strIn``` operation, which timestamps the **Stage 2** output string. So hypothetically if you defined an async operation pipeline which ate int’s and spat out Strings, the output of the assembly phase would be a Flux representing the pipeline. + +Although we "ran" the Assembly phase code, all it did was build up the structure of your program, not run it. In the **Subscribe phase** you execute the pipeline that you defined in the Assembly phase. Here is how that works. You call ```java reminderPipeline.subscribe(System.out::println); //Async – returns immediately @@ -81,15 +81,15 @@ reminderPipeline.subscribe(System.out::println); //Async – returns immediately and -* ```subscribe()``` will generate a ```Subscription``` instance requesting ***all*** events that ```RemindersPublisher``` will ever produce. +* ```subscribe()``` will generate a ```Subscription``` instance containing an unbounded request for ***all*** events that ```RemindersPublisher``` will ever produce. -* Reactor framework propagates this ```Subscription``` instance backwards up the pipeline to the ```RemindersPublisher``` instance. +* Reactor framework propagates the ```Subscription``` info up the pipeline to the ```RemindersPublisher``` instance. -* The ```RemindersPublisher``` instance reads this ```Subscription``` and responds by pushing an event into the pipeline every time there is a new reminder. The publisher will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the ```Subscription``` (which is infinity in this case, so the ```Publisher``` will just keep going.) +* The ```RemindersPublisher``` instance reads the ```Subscription``` details and responds by pushing an event into the pipeline every time there is a new reminder. The ```RemindersPublisher``` will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the ```Subscription``` (which is infinity in this case, so the ```Publisher``` will just keep going.) -When I say that the ```Publisher``` "pushes events into the pipeline", I mean that the ```Publisher``` issues an ```onNext``` signal to the second pipeline stage (```.flatMap(reminder -> “Don’t forget: ” + reminder)```) paired with a ```String``` argument containing the reminder. ```flatMap()``` responds to an ```onNext``` signal by taking the ```String``` data passed in and applying the transformation that is in ```flatMap()```'s argument parentheses to the input data (in this case, by prepending the words “Don’t forget: ”). This signal propagates down the pipeline: pipeline Stage 2 issues an ```onNext``` signal to pipeline Stage 3 (```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)```) with its output as the argument; and then pipeline Stage 3 issues its own output along with an ```onNext``` signal. +When I say that the ```RemindersPublisher``` "pushes events into the pipeline", I mean that the ```RemindersPublisher``` issues an ```onNext``` signal to the second pipeline stage (```.flatMap(reminder -> “Don’t forget: ” + reminder)```) paired with a ```String``` argument containing the reminder. ```flatMap()``` responds to an ```onNext``` signal by taking the ```String``` data passed in and applying the transformation that is in ```flatMap()```'s argument parentheses to the input data (in this case, by prepending the words “Don’t forget: ”). This signal propagates down the pipeline: pipeline Stage 2 issues an ```onNext``` signal to pipeline Stage 3 (```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)```) with its output as the argument; and then pipeline Stage 3 issues its own output along with an ```onNext``` signal. -Now what happens after next is different – the ```onNext``` signal reached the last pipeline stage, so what happens to final-stage ```onNext``` signal and its associated ```String``` argument? The answer is that when you called ```subscribe()```, ```subscribe()``` also created a ```Subscriber``` instance which implements a method for handling ```onNext``` signals and serves as the last stage of the pipeline. The ```Subscriber```'s ```onNext``` handler will call whatever code you wrote in the argument parentheses of ```subscribe()```, allowing you to customize for your application. In the Subscribe phase snippet above, we called +Now what happens after pipeline Stage 3 is different – the ```onNext``` signal reached the last pipeline stage, so what happens to the final-stage ```onNext``` signal and its associated ```String``` argument? The answer is that when you called ```subscribe()```, ```subscribe()``` also created a ```Subscriber``` instance which implements a method for handling ```onNext``` signals and serves as the last stage of the pipeline. The ```Subscriber```'s ```onNext``` handler will call whatever code you wrote in the argument parentheses of ```subscribe()```, allowing you to customize for your application. In the Subscribe phase snippet above, we called ```java reminderPipeline.subscribe(System.out::println); //Async – returns immediately @@ -97,17 +97,17 @@ reminderPipeline.subscribe(System.out::println); //Async – returns immediately which means that every time an ```onNext``` signal reaches the end of the operation pipeline, the ```Subscriber``` will call ```System.out.println()``` on the reminder ```String``` associated with the event and print it to the terminal. -In the TCP/IP example I touch on earlier, ```subscribe()``` is analogous to your program, and the rest of the pipeline is analogous to the TCP/IP request dependency which your program services on an on-availability basis. In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you can call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. +In the HTTP example I touch on earlier, ```subscribe()``` is analogous to your program, and the rest of the pipeline is analogous to the HTTP request dependency which your program services on an on-availability basis. In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you to call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. That was a lot. So let’s step back for a moment and mention a few key points. * Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the subscriber. * Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops * This enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. -* ```subscribe()``` is Reactor’s built-in subscription generator, by default it requests all events from the ```Publisher```. [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. +* ```subscribe()``` is Reactor’s built-in ```Subscription generator```, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. ### 2. ```Flux```, ```Mono```, and ```subscribe()``` -Flux supports publishers with 0, 1, or N events, where N can be finite or infinite. The assembly stage for a publisher with N=3 events is shown below +The ```Subscriber``` and ```Publisher``` are independent entities; just because the ```Subscriber``` subscribes to N events doesn't mean the ```Publisher``` has them available. ```Flux``` supports ```Publisher```s with 0, 1, or M events, where M can be finite or unbounded. The Assembly stage for a publisher with M=3 events is shown below ```java Flux reminderPipeline = @@ -116,13 +116,15 @@ Flux reminderPipeline = .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Nothing executed yet ``` -And upon subscription, +```Flux.just()``` is a [Reactor factory method](https://projectreactor.io/docs/core/release/reference/) which contrives to create a custom ```Publisher``` based on its input arguments. You could fully customize your ```Publisher``` implementation by writing a class that implements ```Publisher```; that is outside the scope of this discussion. The output of ```Flux.just()``` in the example above is a ```Publisher``` which will immediately and asynchronously push ```"Wash the dishes"```, ```"Mow the lawn"```, and ```"Sleep"``` into the pipeline as soon as it gets a ```Subscription```. Thus, upon subscription, ```java reminderPipeline.subscribe(System.out::println); ``` -will output the three Strings shown (corresponding to three publisher events pushed into the pipeline) and then stop. Suppose now we want to add two special behaviors to our program: (1) After all N Strings have been printed, print “End of reminders.” so the user knows we are finished. (2) Print any exceptions which occur during execution. A modification to the subscribe() call handles all of this: +will output the three Strings shown and then end. + +Suppose now we want to add two special behaviors to our program: (1) After all M Strings have been printed, print “End of reminders.” so the user knows we are finished. (2) Print the stack trace for any ```Exception```s which occur during execution. A modification to the ```subscribe()``` call handles all of this: ```java reminderPipeline.subscribe(strIn -> { @@ -136,27 +138,24 @@ err -> { }); ``` -Let’s break this down. Remember we said that the argument to subscribe() determines how it handles incoming signals such as onNext? Reactor actually has three important signals which propagate state information along the pipeline: onNext, onComplete, and onError. As shown below, we just modified the subscribe() argument to handle all three: +Let’s break this down. Remember we said that the argument to ```subscribe()``` determines how the ```Subscriber``` handles ```onNext```? There are two additional signals which Reactor uses to propagate events along the pipeline (and others we won't touch on here!): ```onComplete```, and ```onError```. Both signals denote completion of the Stream; only ```onComplete``` represents successful completion. The ```onError``` signal is associated with an ```Exception``` instance related to an error; the ```onComplete``` signal has no associated data. -```java -reminderPipeline.subscribe(strIn -> { - System.out.println(strIn); -}, -err -> { - err.printStackTrace(); -}, -() -> { - System.out.println(“End of reminders.”); -}); -``` +As it turns out, we can supply additional code to ```subscribe()``` in the form of Java 8 lambdas and handle ```onComplete``` and ```onError``` as well as ```onNext```! Picking apart the code snippet above, + +* ```strIn -> {...}``` defines a lambda for handling ```onNext```, where ```strIn``` is the associated data item for each signal (the name ```strIn``` is my choice, any variable name will do). +* ```err -> {...}``` defines a lambda for handling ```onError```, where ```err``` is the ```Exception```. +* ```() -> {...}``` defines a lambda for handling ```onComplete```, and notice there is no data associated (empty parentheses). The ```Publisher``` will issue ```onComplete``` when it has exhausted all events that it was created to issue. -Mono example +For the special cases of M=0 and M=1 for the ```Publisher```, Reactor provides a special-purpose ```Mono``` class for representing the async operation pipeline. ```java -Mono exampleMono = Mono +Mono reminderPipeline = + Mono.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event + .flatMap(reminder -> “Act now: ” + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); ``` -### 3. A selection of useful Reactor operations +Again, ```Mono.just()``` is a Reactor factory method which creates the single-event publisher. This ```Publisher``` will push its argument into the Reactive Stream pipeline with an ```onNext``` signal and then optionally issue an ```onComplete``` signal indicating completion. ## For More Information From 8161813cd6bcef25303806b38355a5763f1c9723 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 03:56:42 -0700 Subject: [PATCH 065/110] Minor fix --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index c4f632b..d03264f 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -20,7 +20,7 @@ Specifically, Reactive Programming is a **declarative dataflow** paradigm - the asynchronous data source => operation1 => operation2 => operation3 => print -How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on what the recipient of the data can handle. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process for each async operation to respond to events. In a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level to ensure that no operation receives data faster than it can process. +How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on what the recipient of the data can handle. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process for each async operation to respond to events. In a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level. [Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for new Azure's new async SDKs. From 4c7f00742f2d4a7664d84e3d4b89393bd199ecef Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 03:57:44 -0700 Subject: [PATCH 066/110] Minor fix --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index d03264f..79f161a 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -22,7 +22,7 @@ Specifically, Reactive Programming is a **declarative dataflow** paradigm - the How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on what the recipient of the data can handle. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process for each async operation to respond to events. In a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level. -[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for new Azure's new async SDKs. +[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for Azure's async Java SDKs going forward. ### 2. Reactive Streams Frameworks for Java/JVM From 0bb8cd393206469d5680569e94aa1aaeaa357796 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 03:59:23 -0700 Subject: [PATCH 067/110] Minor fix --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 79f161a..04224ca 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -42,7 +42,7 @@ To write a program using Reactor, you will need to describe one or more async op Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` by **subscribing**. -To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over HTTP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for a HTTP request example); your code has to loop waiting for the dependency. +To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over HTTP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for the aforementioned HTTP request example); your code has to loop waiting for the dependency. In a "push" model the dependency signals your code to consume the HTTP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. From 4553f693ff5ee99b6d23fd4b2eae2e50ee3d49b6 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 04:06:20 -0700 Subject: [PATCH 068/110] Clarity --- reactor-pattern-guide.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 04224ca..26e254b 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -63,15 +63,20 @@ reminderPipeline.subscribe(System.out::println); // Async – returns immediatel while (true) doOtherThings(); // We’re freed up to do other tasks 😊 ``` -The ```Flux``` class internally represents an async operation pipeline as a DAG and provides instance methods for operating on the pipeline. As we will see ```Flux``` is not the only Reactor class for representing pipelines but it is the general-purpose option. +The ```Flux``` class internally represents an async operation pipeline as a DAG and provides instance methods for operating on the pipeline. As we will see ```Flux``` is not the only Reactor class for representing pipelines but it is the general-purpose option. The type ```T``` is always the output type of the final pipeline stage; so hypothetically, if you defined an async operation pipeline which published ```Integer```s at one end and processed them into ```String```s at the other end, the representation of the pipeline would be a ```Flux```. In the **Assembly phase** shown above, you describe program logic as an async operation pipeline (a ```Flux```), but don't actually execute it just yet. Let's break down how the async operation pipeline is built in the **Assembly phase** snippet above: * **Stage 1**: ```ReminderAsyncService.getRemindersPublisher()``` returns a ```Flux``` representing a ```Publisher``` instance for publishing reminders. -* **Stage 2**: ```.flatMap(reminder -> “Don’t forget: ” + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline consisting of the ```RemindersPublisher``` followed by the ```reminder -> “Don’t forget: ” + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) +* **Stage 2**: ```.flatMap(reminder -> “Don’t forget: ” + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline. The pipeline consists of + * the ```RemindersPublisher```, followed by + * the ```reminder -> “Don’t forget: ” + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) -* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline consisting of the ```RemindersPublisher```, the **Stage 2** operation, and finally the ```strIn -> LocalDateTime.now().toString() + “: ”+ strIn``` operation, which timestamps the **Stage 2** output string. So hypothetically if you defined an async operation pipeline which ate int’s and spat out Strings, the output of the assembly phase would be a Flux representing the pipeline. +* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline. The pipeline consists of + * the ```RemindersPublisher```, + * the **Stage 2** operation, and finally + * the ```strIn -> LocalDateTime.now().toString() + “: ”+ strIn``` operation, which timestamps the **Stage 2** output string. Although we "ran" the Assembly phase code, all it did was build up the structure of your program, not run it. In the **Subscribe phase** you execute the pipeline that you defined in the Assembly phase. Here is how that works. You call From 552a6dc91a94c585729b3740d23412bd3d4d7e66 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 04:11:58 -0700 Subject: [PATCH 069/110] Fixes --- reactor-pattern-guide.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 26e254b..416b5a5 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -105,10 +105,12 @@ which means that every time an ```onNext``` signal reaches the end of the operat In the HTTP example I touch on earlier, ```subscribe()``` is analogous to your program, and the rest of the pipeline is analogous to the HTTP request dependency which your program services on an on-availability basis. In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you to call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. That was a lot. So let’s step back for a moment and mention a few key points. -* Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the subscriber. +* Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the ```Subscriber```. * Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops * This enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. -* ```subscribe()``` is Reactor’s built-in ```Subscription generator```, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. +* ```subscribe()``` is Reactor’s built-in ```Subscription``` generator, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. + +And the most important takeaway: **Nothing happens until you subscribe.** ### 2. ```Flux```, ```Mono```, and ```subscribe()``` From 416e85c60d2f1a7ff67856f66ee9d21c77ba5b92 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 04:16:27 -0700 Subject: [PATCH 070/110] RxJava --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 416b5a5..f2ff5a1 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -170,4 +170,4 @@ Again, ```Mono.just()``` is a Reactor factory method which creates the single-ev * [A gentle introduction to Reactor from tech.io](https://tech.io/playgrounds/929/reactive-programming-with-reactor-3/Intro) -* Reactive Extensions for the JVM (RxJava), a project of ReactiveX **which is no longer used by Cosmos DB** but was previously used to facilitate non-blocking access in Async Java SDK v2.x.x and below. +* [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM), a project of ReactiveX **which is no longer used in new Azure SDKs** From a9337afb3fa39c3aaee1d282ad04cb2fdec6dbd5 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 04:17:53 -0700 Subject: [PATCH 071/110] URL fix --- reactor-pattern-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index f2ff5a1..53f5dd1 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -26,7 +26,7 @@ How this differs from imperative programming, is that the coder is describing th ### 2. Reactive Streams Frameworks for Java/JVM -Reactive Streams frameworks implement the Reactive Streams Standard for specific programming languages. [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM) was the basis of past Azure Java SDKs, but will not be going forward. +Reactive Streams frameworks implement the Reactive Streams Standard for specific programming languages. [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](https://reactivex.io/) for JVM) was the basis of past Azure Java SDKs, but will not be going forward. [Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. @@ -170,4 +170,4 @@ Again, ```Mono.just()``` is a Reactor factory method which creates the single-ev * [A gentle introduction to Reactor from tech.io](https://tech.io/playgrounds/929/reactive-programming-with-reactor-3/Intro) -* [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](reactivex.io/) for JVM), a project of ReactiveX **which is no longer used in new Azure SDKs** +* [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](https://reactivex.io/) for JVM), a project of ReactiveX **which is no longer used in new Azure SDKs** From c4e13fb10b769a28c9f9af0c86052a9d629b1160 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 04:18:49 -0700 Subject: [PATCH 072/110] https -> http for ReactiveX link --- reactor-pattern-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 53f5dd1..a4374af 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -26,7 +26,7 @@ How this differs from imperative programming, is that the coder is describing th ### 2. Reactive Streams Frameworks for Java/JVM -Reactive Streams frameworks implement the Reactive Streams Standard for specific programming languages. [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](https://reactivex.io/) for JVM) was the basis of past Azure Java SDKs, but will not be going forward. +Reactive Streams frameworks implement the Reactive Streams Standard for specific programming languages. [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM) was the basis of past Azure Java SDKs, but will not be going forward. [Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. @@ -170,4 +170,4 @@ Again, ```Mono.just()``` is a Reactor factory method which creates the single-ev * [A gentle introduction to Reactor from tech.io](https://tech.io/playgrounds/929/reactive-programming-with-reactor-3/Intro) -* [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](https://reactivex.io/) for JVM), a project of ReactiveX **which is no longer used in new Azure SDKs** +* [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM), a project of ReactiveX **which is no longer used in new Azure SDKs** From 4f59853f8b6dace3b1110ac198637121a0f0e938 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:44:23 -0700 Subject: [PATCH 073/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index a4374af..4f7893c 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,6 +1,6 @@ # Reactor Pattern Guide -The purpose of this guide is to help you get started using Reactor-based Java SDKs by understanding Reactor-based design patterns. It is recommended to read the [Project Reactor](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) documentation if you want to learn more. +The purpose of this guide is to help you get started using Reactor-based Java SDKs by understanding basic design patterns for the Reactor framework.The [Project Reactor](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) website has further documentation if you want to learn more. ## Background From 33d5759d681fb681730a3d2496161d52daf90dc4 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:45:12 -0700 Subject: [PATCH 074/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 4f7893c..b96ba39 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -6,7 +6,7 @@ The purpose of this guide is to help you get started using Reactor-based Java SD ### 1. Reactive Programming and the Reactive Streams Standard -Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of data items passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming; it is an alternative to explicitly callback-based programming. +Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of events and data passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming; it is an alternative to explicitly callback-based programming. **Imperative programming** is the more common or "familiar" programming paradigm in which program operation and control flow are expressed by sequential commands which manipulate program state (variables). A simple imperative program in pseudocode is From 6e5d02eef526a8715f04ed4dae2a7172b20e71b7 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:45:47 -0700 Subject: [PATCH 075/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index b96ba39..99a5fc6 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -6,7 +6,7 @@ The purpose of this guide is to help you get started using Reactor-based Java SD ### 1. Reactive Programming and the Reactive Streams Standard -Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of events and data passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming; it is an alternative to explicitly callback-based programming. +Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of events and data passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming; for example it is an alternative to explicitly callback-based programming. **Imperative programming** is the more common or "familiar" programming paradigm in which program operation and control flow are expressed by sequential commands which manipulate program state (variables). A simple imperative program in pseudocode is From 693a2ae7c46e56cfcf4a971b086b606e242b4272 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:50:26 -0700 Subject: [PATCH 076/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 99a5fc6..c1e9459 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -46,7 +46,7 @@ To put this in context, consider a "normal" non-Reactor program you might write In a "push" model the dependency signals your code to consume the HTTP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. -Now I will illustrate this with Reactor code examples. Consider a Reminders app. The app's job is a create a message to the user every time there is a new reminder for them. To find out if there are new reminders for the user, the ```ReminderAsyncService``` running on the user's phone periodically sends HTTP requests to the Reminders server. ```ReminderAsyncService``` has a Reactive implementation in which ```ReminderAsyncService.getRemindersPublisher()``` returns a ```RemindersPublisher``` instance which listens for HTTP responses from the server and pushes the resulting reminders to a Reactive Stream defined by the user. ```RemindersPublisher``` extends the ```Publisher``` interface. +Now I will illustrate this with Reactor code examples. Consider a Reminders app. The app's job is a create a message to the user every time there is a new reminder for them. To find out if there are new reminders for the user, the ```ReminderAsyncService``` running on the user's smartphone periodically sends HTTP requests to the Reminders server. ```ReminderAsyncService``` has a Reactive implementation in which ```ReminderAsyncService.getRemindersPublisher()``` returns a ```RemindersPublisher``` instance which listens for HTTP responses from the server. When a response arrives, the ```ReminderPublisher``` pushes the resulting reminders to a Reactive Stream within the smartphone app. ```RemindersPublisher``` extends the ```Publisher``` interface. **Assembly phase (define dependency relations as a pipeline)** ```java From e46772ee6e35d42849f14608a66afea1efd95943 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:51:07 -0700 Subject: [PATCH 077/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index c1e9459..129010c 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -36,7 +36,9 @@ Reactive Streams frameworks implement the Reactive Streams Standard for specific To write a program using Reactor, you will need to describe one or more async operation pipelines for processing Reactive Streams. In typical uses of Reactor, you describe a pipeline by -1. Creating a ```Publisher``` (which pushes events and data into the pipeline asynchronously) and a ```Subscriber``` (which consumes events and data from the pipeline and operates on them asynchronously) +1. Creating a ```Publisher``` (which pushes events and data into the pipeline asynchronously) and a ```Subscriber``` (which consumes events and data from the pipeline and operates on them asynchronously). These are both interfaces defined by Reactor. + +and 2. Describing each stage in the pipeline programmatically, in terms of how it processes data from the previous stage. From 2a29cc0efd4922729e7785958ff9d6241c90993a Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:52:42 -0700 Subject: [PATCH 078/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 129010c..5411db2 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -36,11 +36,11 @@ Reactive Streams frameworks implement the Reactive Streams Standard for specific To write a program using Reactor, you will need to describe one or more async operation pipelines for processing Reactive Streams. In typical uses of Reactor, you describe a pipeline by -1. Creating a ```Publisher``` (which pushes events and data into the pipeline asynchronously) and a ```Subscriber``` (which consumes events and data from the pipeline and operates on them asynchronously). These are both interfaces defined by Reactor. +1. creating a ```Publisher``` (which pushes events and data into the pipeline asynchronously) and a ```Subscriber``` (which consumes events and data from the pipeline and operates on them asynchronously), and -and +2. describing each stage in the pipeline programmatically, in terms of how it processes data from the previous stage. -2. Describing each stage in the pipeline programmatically, in terms of how it processes data from the previous stage. +```Publisher``` and ```Subscriber``` are both interfaces defined by Reactor. Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` by **subscribing**. From 261c870858ea828add2a612595d2f968004abf72 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:56:55 -0700 Subject: [PATCH 079/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 5411db2..6be164b 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -104,7 +104,7 @@ reminderPipeline.subscribe(System.out::println); //Async – returns immediately which means that every time an ```onNext``` signal reaches the end of the operation pipeline, the ```Subscriber``` will call ```System.out.println()``` on the reminder ```String``` associated with the event and print it to the terminal. -In the HTTP example I touch on earlier, ```subscribe()``` is analogous to your program, and the rest of the pipeline is analogous to the HTTP request dependency which your program services on an on-availability basis. In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you to call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. +In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you to call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. That was a lot. So let’s step back for a moment and mention a few key points. * Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the ```Subscriber```. From 04ff182d049e4ec0066787b3d163307849968f5d Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:58:53 -0700 Subject: [PATCH 080/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 6be164b..443864c 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -108,8 +108,8 @@ In ```subscribe()``` you typically want to handle the pipeline output with some That was a lot. So let’s step back for a moment and mention a few key points. * Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the ```Subscriber```. -* Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops -* This enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. +* Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops. This is the "hybrid push-pull" approach. +* This approach enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. * ```subscribe()``` is Reactor’s built-in ```Subscription``` generator, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. And the most important takeaway: **Nothing happens until you subscribe.** From d2acbcc88f6c27639d03f6c6a31b751357de3d34 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 05:59:36 -0700 Subject: [PATCH 081/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 443864c..d0bb1d0 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -108,7 +108,7 @@ In ```subscribe()``` you typically want to handle the pipeline output with some That was a lot. So let’s step back for a moment and mention a few key points. * Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the ```Subscriber```. -* Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops. This is the "hybrid push-pull" approach. +* Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops. * This approach enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. * ```subscribe()``` is Reactor’s built-in ```Subscription``` generator, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. From ff6c444cf8a79deca9f3396c88acb8be3434b865 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 06:02:03 -0700 Subject: [PATCH 082/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index d0bb1d0..5b718a1 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -147,7 +147,7 @@ err -> { }); ``` -Let’s break this down. Remember we said that the argument to ```subscribe()``` determines how the ```Subscriber``` handles ```onNext```? There are two additional signals which Reactor uses to propagate events along the pipeline (and others we won't touch on here!): ```onComplete```, and ```onError```. Both signals denote completion of the Stream; only ```onComplete``` represents successful completion. The ```onError``` signal is associated with an ```Exception``` instance related to an error; the ```onComplete``` signal has no associated data. +Let’s break this down. Remember we said that the argument to ```subscribe()``` determines how the ```Subscriber``` handles ```onNext```? I will mention two additional signals which Reactor uses to propagate status information along the pipeline: ```onComplete```, and ```onError```. Both signals denote completion of the Stream; only ```onComplete``` represents successful completion. The ```onError``` signal is associated with an ```Exception``` instance related to an error; the ```onComplete``` signal has no associated data. As it turns out, we can supply additional code to ```subscribe()``` in the form of Java 8 lambdas and handle ```onComplete``` and ```onError``` as well as ```onNext```! Picking apart the code snippet above, From 686be9c2e90bc57681a2563f57a8e1002bd11db5 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 06:03:24 -0700 Subject: [PATCH 083/110] Update reactor-pattern-guide.md --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index 5b718a1..a9a7e02 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -151,7 +151,7 @@ Let’s break this down. Remember we said that the argument to ```subscribe()``` As it turns out, we can supply additional code to ```subscribe()``` in the form of Java 8 lambdas and handle ```onComplete``` and ```onError``` as well as ```onNext```! Picking apart the code snippet above, -* ```strIn -> {...}``` defines a lambda for handling ```onNext```, where ```strIn``` is the associated data item for each signal (the name ```strIn``` is my choice, any variable name will do). +* ```strIn -> {...}``` defines a lambda for handling ```onNext```, where ```strIn``` represents the data item associated with each incoming ```onNext``` signal (the name ```strIn``` is my choice, any variable name will do). * ```err -> {...}``` defines a lambda for handling ```onError```, where ```err``` is the ```Exception```. * ```() -> {...}``` defines a lambda for handling ```onComplete```, and notice there is no data associated (empty parentheses). The ```Publisher``` will issue ```onComplete``` when it has exhausted all events that it was created to issue. From 00f31f2e14fae55effe9af157c8f2748a76f6a06 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 27 Mar 2020 16:26:18 -0700 Subject: [PATCH 084/110] Minor spacing change --- .../examples/crudquickstart/async/SampleCRUDQuickstartAsync.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index 379ee18..eead774 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -357,3 +357,4 @@ private void shutdown() { logger.info("Done."); } } + From 874431058d888e0f716182a911def5082597d7ab Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Sun, 29 Mar 2020 13:44:45 -0700 Subject: [PATCH 085/110] MigrationGuide: version comparison table --- migration-guide.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 migration-guide.md diff --git a/migration-guide.md b/migration-guide.md new file mode 100644 index 0000000..bedec85 --- /dev/null +++ b/migration-guide.md @@ -0,0 +1,28 @@ +# Migration guide + +The purpose of this guide is to help easily upgrade to Azure Cosmos DB Java SDK 4.0 for Core (SQL) API ("Java SDK 4.0" from here on out.) The audience for this guide is current users of + +* "Legacy" Sync Java SDK 2.x.x +* Async Java SDK 2.x.x +* Java SDK 3.x.x + +## Background + +| Java SDK | Release Date | Bundled APIs | Maven Jar | API Reference | Release Notes | +|-------------------------|--------------|--------------|-----------------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| +| Async 2.x.x | June 2018 | Async | com.microsoft.azure::azure-cosmosdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | +| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | +| 3.x.x | July 2019 | Async/Sync | com.microsoft.azure::azure-cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async/Sync | com.azure::azure-cosmos | - | - | + +## Breaking API changes + +## Code snippet comparisons + +### Naming conventions + +### Create database + +### Create container + +### \ No newline at end of file From 6506ae29766b42cfa736f9ada88c5498e3eb0c67 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Sun, 29 Mar 2020 14:54:04 -0700 Subject: [PATCH 086/110] State of migration guide --- media/java_sdk_naming_conventions.JPG | Bin 0 -> 105587 bytes migration-guide.md | 52 ++++++++++++++++++++------ 2 files changed, 41 insertions(+), 11 deletions(-) create mode 100644 media/java_sdk_naming_conventions.JPG diff --git a/media/java_sdk_naming_conventions.JPG b/media/java_sdk_naming_conventions.JPG new file mode 100644 index 0000000000000000000000000000000000000000..62618cf74d0026cf68db7548208fffe92ee04889 GIT binary patch literal 105587 zcmeFa2V7Ixwm%-42uSaWMyXO1K@gM@8%;!{OU(!h0wN+UNJta~lokO21tp_^hzJp> z(jtT+pdh^oAs|f%B$RL+HSu+TXR-cdvci z8r&L(2%RxDGlsCSu|a+SzaU#Ah^!I9+Y|@Gmy->o2dZ5y&YB zI~&{g@4vk`xH!JQwsCTDaB*+r=Kg+c=i%eozMXeFH#aXoFE8H?@Wss|AjrQ%;QQa- zg?zvK`&Hogj_usrzn}5@UR&=W!rR#%a4K@J$wJtL**JvRwi+Q&u&rERkG^;6Z!b1> z4o(Dx<*?EHa5Taiw(l@*KYm2XTR!~FxW44 za11!PzxRucJrq1RggLqPX>1cYVa0thKvY)q_I9z8NzaPwdE^dTQ^ehGbn!~aYmpSF z-@Epgp8eN47Wv=m+21?%AN?AJ?BHMn2aiJ-0)sHdfDyVlqJYJ#um#~_0RxoK`eQLM zt9VfaVFH(6*b-dWsU;yCk+Eb|lkySao_699`3rw>`GuFd(sFZK5Fj#-_F@b2j7~#u zLF%V*TadOk*mwqd`JK|eEy%(vhRGJB+b;y4_YK1O0X@Ns9nNIMEDdczwjkUY_$`R) z{w>Ho6||54CKT_r1!3&*0e`&4&27g1dHx@&pSKL>39=GtJzAST(5+94EP1N?K-TgW zD@Zb*kMc3zydMH`5RPNKVlpY861VBb9LTZtM)r6ory_x3q9VF^xoi`B|qyk zijA7rFoNHGg{4bLEF6D&Y|OVEFEliNq-W0lb3@jpx3z8W?pp~-c&f4Et%3io0eXo32VFa?VkkDWL?E1QHan1!PQdR$sJ9GX!v@gI zt8oIpA;YgFO8oezOeuP=>iW68_m1C5^ijVHEhQ4r!+W+ME@c$%Ms*HgVxABvzq7^= zI^Y+q)17mJBtp6vqH$O%2V%&xOoRrFb8OW9*8I+9?agQA<7ZJ!>=uL)R3Ktd;44ZmUWu=-#-(B2n{a)3Z8 z^gUWg((-+kKR*!JW9NKzb?k>E+RWH3*c0e)V$*Lxkm@Gt`7PL^{~w(%1vtDcXV3O2P9gsIV zkAhISwjj}KJPW(OmSs$IOq++fDF&NdX>V>amrdT@cIUlnFKqlZeEA`9*qPOai-oN& z1sg#km|GCe9xz{N)dzFZ%MKGTR*G*K6XvkO@!KDqXEPG}?E`)fP8BD{qe&DzB?nJ( z1pHazUJNBPWH4|=4IKqDUhxO^lG3rm{1Hq!Dq%a=y03q%9*oeT3yHVOeeztfkImz~bcJS` zb*|0T$EW1pTt9*qhlFu6gq$b0Ac=TSD4@erbD1(gD$1t@Hm4b?)>icBAI?h`8Pngq zdq~nf_M_bWOWsmZr1QykH|A5QqAWft-51RBVRiDrE?eenw3m8J)(*g7GQ#evM^%^l z%FE$KQ!l?(bWq(TtMV0@q|9f@LvuG~#70i$ci(j#etk01P%o7Zc^_R5<*WMU7 zYj&~!b*Wxv>;0e$?=!R!X+uQlp;$-0i=k{AaJskpd(3@&3Gpt&h@wNnwWu2Sn;{O= z?L|~H>v#uW3`EPHu;1>a6MS%4t%xt|s-5XFg$Dxd3Vn|URI!cWI6z&p5jvzg;Sj@+ ze1Qlt&~&wVKU|c_^-*Ut`QED!vHIJ*q9h20OwxOhnwIk0%t zDUy?t1T>(NO^XEv#xDf70Bh9gne)^=PeU*A6%`odXl|Ss=U9)v;cT5Di`6*Xd|u?l z5Byt@VL=w(Q;@mgXT~F;zEtOlrG-4o3<1V{P@Qx;+s`Q7;qIA1)0|*@(!_)DTiuQ? zI}G9Q!7a#byu>Pgm}4A!hY|K{gE&}sq)V-gE~s^p)fvogDKW0w(JHCU2xoXy1A5d@ z8pzVzS0~_xIb4~!HOK6iCIsVQqY5$<;kUWT8TM|MIy9rWTVh!1V7}N4{%Igf| zx`wk6`>^w?8b8h?)2>qiv=lWf-3R)aw7PL9d3Fopg&Ddt-og;dq?HYzFT+I5CIn+x z`ji*>`7TDx0)+|hP~o{|^}T9s2QTU!_cV{Z)E*+m4g>aM88R9aJ(5xr_8yi4@2x(R zJt6Qe_sZrI+OZ!pO^W-ph$1H|h24w~jtQz6n%QGMs@CtYv`m1FKZd^5!PM}87(oEL z6@xU#)bH631X9u`bncWL9>??hJhy)re#Lt8@&j(R(Y+O^vMV1NW5QYoud~fUDaT>J zKFI};&)|{(ueNst+9xa{gHU@Wx=-khggG4wk!#r} zQ!PQk0kVP&89z!3i>C!_^Lh_U$PIZ6$ezU9d*Cx1{mk3%`LGk(Ybcvrs#gi_6_$GoFQt-;)PGB520B+Br4yyWy3p#xJe~4&!n>f$Jv?{!;e)) z4?ioErl>8Pl<(Fr5II7ciVowkzGt}_0X8!nRE{&knZ{&w`%7Ih)+unO3NlKTwFdd7Q;P{J(vUp6O3~@o$b`o{oqWg6>^uF76YW%T^JN{2Y zq^~8{pY7+OtIdjA>N;{BL^v^#pv7HBkLPdZF~oM!&fzI&Ow=k&+K{hRAZp@vxM+(3 z1Affe^@G%=sj3U zHso{kZ+c4ZfBq)9B=@dgP$S>I((Ju1b!H!0rYsr4^zkjoEkk)wx6Kl)1Iz?6(j9*rd>RNTe$wkB@;&RVa0?DWC^d8Z zksM*CSIqP3@?vUG{KWQ2qsk1&?V4u(hHDqAhCL(VrZiwK05^+I$A1E3_EcLM;X^(v zq`GI>gA9Yuna-8eoT{Q(d84xON)H9x1O1o9N3E2e4DC0xs>U?pMr>$EAPVftU_ahR zedJXAO`jn^on-Xs#bc6zLD1S^L`FlSVChG*^G>FP<)gaSTST|pCP_g!Hibc}a${Tp z-qB4^jfULREl6`Aw4rV{wIZGjE8mt*qMK(rhwoCZ3%5%;sF-%*@M*g**EY<_?^Oa< z&&rSM2vxzPUR73z-^q*Fc}w#H2d+wHzo5p)4ryf&g*3L6vGde z4=_8)N1ZF#_D$#sd1iBWp3_R%%Z(yac|GKe4nE2?N3lkoMHHI5FrD zN}?x@FTvXJn)}^JpAx@>OWw}B=RRc4!;|q8f0KA^bwB~i5W`GD7u2a7tqH-O&cQ_y zau>qU%4=^QC{I=2f9;-V2d~~W;-YANM0F7d=z3fL2+zP7p*n&kgywuQ;;fbc#Fo7$vQ$`xRd3020bnXws}nz;L8OWk~Xw^)O3cJR};3Rb0M5!Lx72{VWJO}W+~&T zoF9{_Ti%>|fGu-25lZ-KddE!e@CO;SOAzM{^my4OvKv%+JNnJXxE+PNc5A6}DqMJh z(>o$a-y0dhCO+u|CEJ8;K|p}358i@!TM99(0sqM*VT96Te|G3AM5xv0tKHt}In}kk zv&n+mldLR3pzDz^pH3;DpuS=YBFL~PLImXW)f`29 z?3rCR3%e}yM&q*e_1QrM;}hbWWIBp9%n}DjCydpabqFY)~nAsmS+eBD)iFdwl6 zIZ*o=O*Y$S*J;G{h>|n#mBYg2La(WTM$gG z=J2AQVYpr9?fW}NCvN1h^9dc_dwY`&yyP`_D{~!&gdi=Hg1?nMA&(b8lXg+%QTymV zv-sim8}`E!Y|?RLAwL-Y3pA{G`^d^A2pU}dC?hU(Qn^n8IQo88*A_&oXfX%KrP~0k zmBw|N^}YnHT1~z}G4Ic>M9#3jk=lw-rWu@a`vaMt4$EMm%~cKF*AD5#oi^>F;Y(XAtLxCekE? z8*{?S7fr2=ZkjNJ)mgkoRA&}%`ZMG?Aaw%D3ph03x%W|bRGAP+{=PRd`tx?2ZFVdg zok2WODOG-yWb#N6zTwqbPoy}iv(FPLa2TMv8@o_VS@b=iLm)!5_$_UylJ}=dMPBbL z{jAu2gC5E$%4d2;H9oEKs!np{B7XcfoMKDIfj`Zm@mK+*H_1F2NT~(A-+kEty5x%cIiVW)I|`x zQ*8T!wjjbzE~BT8B;T&Ew^~+2_a1O&G_qpD;s$<0gr) z@nOu!DVh&}r>bzKyvNJvA7-XZY~+^cG4`+D$VqV)?HKZV;CS}q(d+4^+3}ybS67UT zcFYIxw(e5RzSY)tLu$|6ooTmt_dox)K4AAiQ;=lj>d?}*Aa4sMFigEsD-(uz9LqYB zZjK(ZqnfauF24Lx5Rd=O)njn+_&48&?0+5$@jfc1(ZmJ3R=-CSE=5Nf^7ZN)P4G4K zthy?yA6uNt_7YY+-qFW}kq_E-uh}{|KG^@#*_`J^G@Sq}DI2;l&M%f&y!&k zXAlLOuWUhL3K$?}Jdg1PC+`T6b=WPBw&(H|*Q6e#A+6DoZZN7r7A6Py*1uxiE5BSgE}`Rd=~`v`X~^7{-gbqLvN#n{Gjs_2 ztM((3Tc6x|_Gkd>lN1P@bs*mU8f#4$2Jmq~rkI{3Lt<3w%KVVS*01^KlcJT*v z@1cwTwN^y^6z=^qyf7h|s?K16n*;BmoBQqYz`@!r$fP!EoyBWKv9zERVGAuL3oduB zInI=oVHYG3X{MaUb}gw%r52Ls*+!eM2M8*B6gB&tg&$4d;P=#ZC^Kcwu3tr$7{i8e zS!mKXFf>HZK665kmxF<3JeX_>Isjm1VCI%jsR%CUd8kcdBs; zc&%?+(DP7qUVdOY;mX8pUr4Tky7A*$BQXc%&tg_aknaat2u-n9-#Ft>KJb10z6rV+ zTJ;?BkF3IRC>mW4E-+5rumtJfAv*xG1<4sEqb>WsmmUA+C%q6WBQZ!Hofw7iXNXm> z&U#FkFhPEn_T1r}`WQo%fF94qFMB{mU<;BU9Vk+(md<3w03f-SD~CC>1qsOqS0?2s zb+P+#^K#VV&tvxseHZxGBZ3=T;2LJ8AYpV5Mo=%~@Ps+py}6;~WTuUoR;zoWXOdFI zJ!MVf$!m%p>wyoB7Rj_jO*o>nR4NGKAwd_fui)&pycavM`-_&E@f>TX0rCW%8)0cS zpl`e>fD60GX`hA(4VFLOK~;U*SbIHN9-`?BYr?YY2aKy0=$3VE18_!}){mTrS|W13 zIBS22xs?=i+!gbK=9OrV80>**oI((;u$GPVtzfP^M%l;@F+&2Ex}TeEw?T0q1e0#L@!}7R7ZGc%0f&aIRMmd8YfIT-t^$ zd$aid9k;f*kh!Jzl;sT^V!U$s*6C+oclVX{!M>~*C8OXmEr4SZAH4~q#oVhAogi-` zT(>O!cFdsZ^-+Uu8$vs@} zDSwmvZ|3ri`NjfYx6$@zI3x0f`Te+9dmqcRxfi&h5sxV+ zRkCF?o?^ZW3PlZ#=~HqtqANTo?e8_+*Ds`*rcIpuEPsFdgS_;bYigfLc<+DUN~?1O z$hioUlR$OR5JF(xuD3?XE8y$g`pq~w7gv>6F%KTvy1pj0F@9W7O?oo$>~`my%C@oQ zs1Ymcn^#i$6zOG!)iQ(eRb|@)d%Ik5&i>0QM|Y}z!`~;q!QUmL$Dd~H2D(VQo!x5i zo)s1$W1Cm%Iyt0w4*M(HD+D)nysv)zHsMJYKS-z5A49uGaM0dVQL_prciY%^A(h6N z23}dy4W<(h+D0u+0ffmz1))$UpnwutI*#*j@4c?e=^po z)zvq*qP8<*e^EOD&w+HL8Z0d#CC7=$+>?EGph6n-gT&9rYjZEuMdE+H5SCd$X(g;#6=)QN@72TZ_y~ z+k{f0KkU?=*ts%vWSu4jtV1d)D_G(|WVDzRzYEvPq$eCi6c$dK?XVF2&?fF;$}6t4 z@3zz@r(V1l20EK1>341Aechq5E)PtH??$QbBeI>w(`M^Tjl{qww#;z8EeK44HtIjp zYlw5$fPV>Cb?EuN?tdRN7;i`KJ0yXcI#s$icZ(N+S{Szl$mA|(#(bO~Q;|&wp)GLEBLMOfoh1&*q zzouz`r1m2&F{|T$Ic~-*IB!9etj00)?KoCs-_+VSXt5XLsYxVkK^DKs=dkTEYtDwD zzI^ODz8Uk1(W(ACR<#_)dnk=$BxWb_lNb5>Z`)1JOv93wBSRy`m)}*TUcAr75njw@ zqJP5_SBC_vAviw+JoKa9W6*Ba0fs$wTgt?zn;a^9@}>*R0=o)pa?guaAQPXxvE0oa zm+4h`weEbB^Sg62tGJYq(V-v{l%RUSed~v64@3QO%0>cLSg~t@@z|2f>ro89i5be- zWB5dUZc=smJw&GE+weG{+ajl?ErqV!whP`7%0`3;sm4=c2!bMv6K{I2l|lvPGp33# zQW-2?KAvkHZ%;}^rzV-k3nktdq!-TmzD>LslW#uU>@)12iz{0y8Su*QR5Q5HnN)~w zuL$Igoqzp(Yt`#`jc*-_=Mt28Hr911eptZ|`4F6v1Y-<&=o^+7I!N=EH`es;*+n(Er|qNYv%#NC3#EI9X4 zlPhhm`oT{X1nur}% zJ?;0kQlkJiTAI>!-1%tI8*cmYj1_DGk#c5U@GiVekN_7XOsJA=-lK(aQLi07$T`ou zK6C5|*>TnJf~KxZa2w;Dzp7yKJ3ssGX(zK66W!?t-%f=ec^%yQA>h12*TU0`s=9#% zB-|(*#}GS3bN##pxnw!K1K~gzs48PU8FsCuq*x^%Rpj4yoe*>-)}*zieXK^M4#Sa< z7f&FFn~;*L^lOZGJ{PEH>o|cE#5MXK$`#$7PIhi+VPc@PL-SzapsG1e19y%(!^kip zVCYidGA~m+cOWyKr7cK>F=b6Gppkib-!K~BwCO$?8N2^S_J253S2<{wAQl-T^d?~o zbEMkvragP9iYb$V5*NPs9E(UT`&4z}t16r5!z+%1;)thjChwb^p{~R)@uk_=1?ZH7 z*gMRj-yP^+2(TAvUK2~pcVU2QEVy~p&N|A>{cEM+BaT1xzP^^#7VfS|{TOeR68|-t zGr;v=EnOFRno{?8_jKMkOnRfC%K{=s*0!k^pLnQ5 zs9t4#!V2Td7GA?hY}AU8vi*CeyNPsx`HrfnBa6veL+M6ikAK*{E4|k`O;6y*Je625 zqxfRG_gouUGW_>lHsfvcWw7nucT?^k>a}_+`AVBHSRpgI^-)z z*dyMj?^j58F+1!>D}4Fol9BJ_YwM!zw^Pc5B=p%4;#N&oPh7IKUu=@UnP*#RWqj#; zUIp8Ogi^g4O1K&FW0F+K7{eZ{65Zx(M(3v%*E;tiG;c%k+f3`4*s(EXrOL_x87t%5 zEMp}$TLClqaW*?k;d9q%54>Bd#PT~dzb2P}#(&aK8(qH}sS3Isj?r=op^y%Oc`h;bB7Pt1wAnHK-{6&??y~G0ss{>!F4P)|b6dQXAc}1K$ z{VcIpaGyUZv3Q~=e3REICNR86T_AVDTR(slouOJ_U?JSkBb(BWY@s0>Kf~JGj{Lx< z?s5x>8|P+-;!jc67Ye9xO=F661Fw;Gl#U4dZawp(<8(QT%U2X-k9URxP z*H=N}NVwz${~6&;I`i*nbM##UKug zScWJZOb196%*Ys4{7r@$ZWIP6wlE&G5d7&}f=sEk4gXm5q87eo^YB8Y4s#k!Vp=U~ zSs9^rqT+w@HUGz%3--U9xf}vB7syWst_(JQR}NN2yoX*m{|?;7+pYo>)incP@ikXa z=D1G7ZXKfvz>74pB<8)-w2NGGiiTA*t~lwWkjiN^YWv{KeOnDqk{K>-V&TYP(e!q{ zyLlZmm#geo2GAeZ-a0*0cDHltp0XP7T$*||2QOw}s5^<181lX%giT|XD8(`gP1;wp zv*O-bxO#c(m5Y_tNoTjadNkz3cU)>4f5A3g3)ael?AzUS7Ei*Y-hAi140ID8Sw61a zUu3Ra!<()YJ)wm0Y54eR?(^HE2ZL+4XxJqvu%A^%*R-6NkMJPS43B+gc16J1kylr&+V+ooSbX)5)R!V|AHs{j)=>7p@L)%V<5H zUhGYMx*qD&|9MlAb{8;_WQdu7wO(+{Yf|*3-e9Rh?LiM&)BbIi$H-E*Q>JQjx)PtX zcBm}fXYECv23o=P=ux*}DbwqO&0q{+X-9$x{e1sk-vVcKL({x8(`;jFuY>t}q@tRt zXUb7-%=_r^*E-;WyI_4Z#joW~dXqh6tUdU$MqgdCpTu!Kql(s)>%`iUr1cL)95g2o z*&~5Wnm0h5@ZYujSf?9&r@7Wtb2m-aZZBG_ngmG znH?-0RT--fqWFE4J^Zy=Ri_7Ug6y`ZL@BSVZ{*UjXi*RQ-)`;&x6*$oYAH*-jCJUUI~Pk7n4#y zNK4PTfmKaYx(mFG`WA+Edb$NIk}R@KKl$!nBq z8o9gXC}n;!{fBH+068L znR|h#mxExX0#^tS<|h>YO@hT!~S#Z~}n*}wzG2~R!a8qw! zlR@!2`p(p3*a15!xx2P($9GwB^iEe5KUVa9bSkS5>a{R;M%bnGomoLph55*0YB zIu*b1?nTaMhM5wosvLzu^&iv1mzpfhQSa2_cPyq`P(v{J;1+XQvwE7JvqsJf&Cvj@ zU3QiyJ(gRC`B7TOq`nleOUVeC>>cRY!viWj%{PmUS;{b?`dW6K974QY=&dpI?@ZM{ zscj3_^CDW`zO#Y69DS&{CXV(PsLo}C0YgQIYBS15lxW3(f~4kL*C;bzF6BlS^7SOgzbh)rz4XMb0%hqNtb4d7c!#^)MfI>Fg+;G6wMRZd zFQe;a^-uUxM`&R{A;rYLvf-OVGW^1|^n>^HQD<|-A|Z)JQS^lFuNeDzCyRlF6QC=; zRo&zNEX9Hv+GKFfJp3MXo2m7w;@(8U!kZs9wZ_(#;S1q(OE~KTcDL4&^pGQ1b!Otf zqkrIL81{qY=MQ(qG_OWfUQgU^( z#Ak>5S!d>;x!B!)QvlmIkqz*wM@ty)W64Wkzj$ zi5vF-X+)L0g$xCa>LQ?o19aRd)73BGLvTVtXZBcAu=EwOlCY#kF8p7`R}e$cL4OuR_0H`Un~auob+q|wCwc!!&xY8%Yu z?|u!7vL2INWb?}%y(KRqxOY=(QLM2+*FT>$pymE9GhnLj$$+Kv9M$N{Om=2>N&)$s zq5Wt@ZIxr;fuc{l74yJ7NrK>-)Pf3nb#ZN$NU??uqnqGw1a4D0>*0J{nCR8ZZ<0RZ zJz^(2FPzzXFYI>AQ8jYHg3IprGccJitS{sMI_C&O5cJ=!CMiMJ@a5>zy7f(G7xJ<` zj#fVNk^CeCXHSu2NGP77^q71`$srF=W*Xz3a{gF~udOuwA_kFb-XXHCx!F&Dth{t|BQ^&csx-B-i{96b^kkORc#Y!#<<2`S z$vM9l!@xOUB$uJs<>Wk2d9rEv)QU1Z{o0oxy$UZ55+FO1( zy2;0t(|n~ESewhW8LZmm=YB0+nAD`ygtD^2Id9b$*-ENSs&#Qit!r9YVJD8vwZ53z zRjnPAnz=#y(hrV29%0?tK&?(Fb7>s}+dHz-UhcQHPn}e(@Rkuw znLbs@{^d6ODT|Go&_clPo~_$knkZdVst0Iq_jn|_t432TR*Id++SZFZH=>xEA>mKx zzX?8vMqUWXPKieyTyEpP?4M0Pu7vw|fb(yTx+x$8ey?#k!s*VF32=B6B)L~c4fF8H z6W(3Zv@$VwGNN=ic_Y{WO)~-Pa>2Z^bf!sth+tdo=g{D$)zw^>6OlXcFvoFH#r)zblSD+p<0&5aB?k8yR4==AGR_S z5w?LJwG zY0hp=%hjID^VOCdEezi4)+Url<7 zvjTx27INt5X$LNp`*Ha@w*(HIGscsN6rvgPM+TaH@SD?AjiJ}QGTrfB;qzgk=gnm# z1^GgU&uAthL@KO(!N_me24q5q@$BLB790oTaL-kJeSM8rNbR&;nSN&sj=M_d#p?FS3zwb|<(IlD z!ooT?P!f-&zj>;A!?xDV+tpPuBQE471LrTR;-woF*nDF;;o0S)Zv(4_Aj=vVgRVEN zQ$l@&msJBE4aA`gboA;jRrfR=x|YU&<) zIO4p*z8D{R_B2jLfS_160i`(2M|f-1+Q0z~l7WA-)7d?Cb>-_mg&d(81F|kKjvVEn4+U97oZLfb2dCbM({tSa;rw{Gs-bQ5o%|Ka5s z@$0XiWQ#p{m_kc{yBadXH}=>E+|P4+^-$%VQTalTOa91aPx0pMJQTR=5=`VGuKbix`YP?(VNb(zy4~iKOZ${Vq8*&lBU8mdi`Ki3$ zc7^npx@H)b_TQ?xkQ!RBO{H+^b-QfQ@hym(t~3r@X1j(PF{aaofTwxBJzx`fRnjuO{Fs(Uw%xsP@Yr2Nei*>Su{BFxQI7LDZ0dJ z%FNu=X~f(+G{W*TY=l67b1_a)Fr;dRCsm^4* z$YtO(;!CG}0K&(aTs2D27&J*-ty8zxVqiq7F^N)VeHE&#+a4@K?RCc3}Dsjv+oIRUF+YR{x)w7#6D;tKM_OM++T&Z%ej zAB81PcFFCS8L~hT7$=!e^dkc3u8adzlc*(q^*n%^#Co)ODI;j$a7(aSpmO35FN?J< zpZ30XUIMD+lGF@S2EEL%PY=x)7=b1nM5;q$UzEMg80+3vLzga3JFEN%mab$7fAnai z`&6e#6T=J04yTq(wA`z4e3_8$wzpTY4za!@#O-c>=(?ux_3Xr0)o~z3wq^R!>N5pi zt2<_;xuFLXa(nDu;U6~7mS_@4hn5}(TD0mZ`6|5XAKirdq55v>#Na&y0ab@+1{!yv zRq4*!3~M;Oy+TAyw8r{`+HLgx?99vcEuU86*#oc_n8h_#lbvAgcuS+86!H-8l!V@c z)T=BNaRXPA-4j(mu}h_TCjFQtTXgw6$HOPv_?}!Ehh{R&LHi5JY#Q!t81+35gkKDs zP>)fx%h>cg|MpwHLFVumrSz{D)sJd#BWCK4AMz25hf?gf*H}@!8r=ja>J7LZjH4CQ zpc^L&TpZhywcE}_DeRMr6s~~EjJG42IA%!asiba%^Q0;86g)Sv?sMVPXf6P4(Anv7<@F9!xV}iodx4mP#n{bB z7jA}d-UKR}LZs;fsYSj;1>j;9-De&E>HhlS~ z^@SO_`8-rqn_nDh`EovBjPE5Eaj-A*(Ws< zv}bg}UFU*IkK{kH&f6)To1J;iEvbik6N^N(;L8UR43B`W__(1Q(y$NzcA(+dj$?O^ zMZ`<^L=zX%@3WV0L28=1I>sA|jt@&dp>9En)1vZN9e7@}k02EAH!EQgDMhuf=Qgh- zALtr!c2Z8;-w>&>XQrL2`Lu5*2UMQ*iC7Vi>cp0%#~JQNH>ha)17{Lg*`V z>c_CybwPj2P1^+S)6X+Biz)a7|DaK)-7p(gPc&7{dU#ehVNw{YdNmvZk{#O?Zm&3i zR>=7pkOar2oz_N~8Q!Pqoh6~DFv9Nakpq{?_LB~MOEq)duUJ}Ll1g)O z9onPs)3wR)ameo!_nh)7tnw||^g(4IRRBJ!D1lgB(dfTxzA8PsNMCT$eEVy^Rp6j! z+Ty-z;s@GQPrl6du2g&0`AMVs=0H1)au#Hv4$uwI_11>_5g_=z>nVy7n?W(W?)&@|TREy$emvCe5`W+Xf3$eXVoN%0P??g&e5!pWjN zm?aPE3@3$Mx@EpE*1Ewu!7#E-$MxXLyd~={@?#aDmJ#Wwm=YzjFkOp9fJ>p|fa>FY z`W6$N_mAbSk;|sy)4E@lJzg~LJ>ss8$m{8G%$#6#xdoVasj~+ir(E;tS!l#s zr{&GnfG@kPb;V`_Z3~r8J#H;l>!%3&3MmrxpfgIH)H@w5g8LCuLObDYkc4Zfw zraKN$U%xsd`rs(!4za%&_r#_xzow&tU=)ZqrFT+JteLsvhq7y!eJh_CkS$1*%>-ss zVhzk+AnD*^8?ssAma(u|XE6Gwu)|r3crd*nl^{Vr$Z%7g#Q{>vO(2BxXHIl(taqU) zdn{Q8>wH;CEqCp++R&hJ>;)g-C!C4aWWYW$?)?$cF#z@;r~<4F9lOEJIF20INGGwp zzM3#puwv-X6*!Ph<-{{App;igt1ZaMF)+rsB?9gO(B547RXZ`jRiXsCvkr{WZ!M## zLFmSf>lnr+_RALJ18jn0n)nefxB`=fk!*o%R#Yt*!9WzYAQNC@`YRpb2GGA2Meqbl zSP`pm26xI9Brcn!)P5H|)61G+f#x9uJ;J&SpdW95-Qdk@g03Ez2e-Aesc%6>Ib|CG#5}DYJM9x^ncp+-!1*Gs`y!Bqd!r-81z{@AP7dPk|qqx^zB=a&U|Jk;|^=Du^*!ybWMks z<$g6?{$U)LpAor zGbivMV8Q+ZSigdl_}>dDKbHhT3Unj%Z%Ftn!GB`a{~A{PH|NXGpzpso-)j%kbO7H8 z0~jrf;?gs_v-eQT%lt6;;#521{%>c-9p)UFB{hy1;3r>MR>McX0Ax0zREtY0@0bdG zIqIPC{@eQ>R@*ZwBXlcmb}QV;I}@6}Sh#1&=rb0GJembvUrtLJ$g{9)QU4tn{~sXy z3yA*)PF7uG!=<-bS(c1g&{d0O)Q0`(PycuP>F>AEUpvsh+2|jH>2T1!{aw^wk@zRP z{7pf4auP{a(p zvM;z+r)ju3FXWU(Sh#;cK)_L6F^Qyl&xTt^4Itv1A7NLQ#BYP8hMP)%4J<#Se}SfN z!FXh0d=c%vpzx_|VadM|Ho-N`sHxdR;(teM>F_;r{o4(|z~vW_E6C0L@m}{AYW+pj z@VmY4FGL;jiaY#WS$~Vddw$jS-yN-G{Q?=l;P02#W&SV5usARrw)`m^{#E4pix~E{ z^uHFv{(6%95hMCv^$WjyM1LcW`cLQF?~0@TF6kc=HqogD*vhw`5;Vd8!36D}MN>B& z5?DKdM9U#_(t_%bzJ6LaSv8$f#ShBSIGC>eLiit(pkx20g8#ray(To6pZ_i5`USN8ucYU;bKOYm zt{U+GQh_~i+m|%b>=BTAiq`n{Nx7z*@Un)k^oIol)vVuUF_yj)B0nF2zYIy-Uxy_4 zDJ&1peR5Gu{#`}aJRq*lz7krq1yTPUB*{Mv!q417;m@KBOUChk1|0v8N&Yso^UFK^ z!gM%2eD~KPTU5k?^oWsyvNZq- zBNQUwB@7_fTo`*g1d(ZT=cCl}ldGV)VJI0tYCWUhP^t);@0s}sPyAjP)9Pr&l;ikEN>+QepGpWQ5tFuh;_h9qH%~*25 zeJ03{evJd}u)q@Um$-+MxD6!@;@Wf`E4kAg zu!&uQ60>O;FH*z2^5?4VDhGIndLR45-+PWjEvYK`b&k*p&PaZwCHTZQK@D*6z}FXi zg3XW9;Cf~@|A}!ZhA4sBj;_~+c|9@|LaL`sINS$!MYZdBYJRY8KVw^FEOPF|c@EKA zPN(4y8AE%a{crL7V;%&=)e$}`mA-= z&dePdxz_*IaKXSt28wzpa%!a;Z;1Eo1XLWF3k7K-l%;y;upB8bx@LDFqOJGKoWn7N zj;VX%51U>1;_e2AMxJi_!23qpt!xxr-OxghKW80ezC@`q+`uQ=!p=}yVi=B;I!4nL zMC8$QMMdRpg*55MY2KIolTCXUKX6@k+ol=Vp%3j`{-#|lZmYy=b$5v;U-@Lt6WeEg z-N_}xBjo%GZi@8-4`qvkv?~6`zs2Ud#!*w1goQk}6t*n!fIkjUtc6U%7Lzk?iL1s24w82rb1f zlu*(267W);dLlOfZ!kRCE73MQV^%#H%${x(#^apd`=~LtN!sH;$96Ii%BdS`=X-Hd z#jQJikXCGya^5C6-mS3741O9H9I*o~@xwz8olortYl>Q3b4JR_t}r!Wp|KNnds*F3 z0haLmp}9IW>QW>_J>Lb=<>WIwRrs*`&U=;Qs^ZQ!MFVU`sO?DUZWj1NMA|2y>_^tn zrr`a~SZzPYoZ`Nj%G$)?t_(AC(}|&eV@HnDU3CK(_f@M_0s$&`y~emWH6SJbs=f^^ z=y{%fXvn&4hJ{IJb@^~A)+_kX|3}?>hc&t8?V_kCiXuvpPE@K?m8OV9MWhJ`NH0+l zAw+tIkRVc|3n(ZEC{?5>y+?WzLPtUgNhs12kPt|5ziZ8`@64J#Gkd<-*LR(B{t8#% z;eFnx{QB>{)q53RA^kCn$<9)}`<0n=_Gv69+?JRXN;~$8>6qCUd;&XEoo`A237d7k zH}(Cr$YG78hNHx9H$W<)A*vIyxP+-|bHAA4c3%9X^WbaRY_0YmG5e0&NtD;78Qj2e z7zWlKR+W$@N~*3EB1c5SA>aH(6SE~D zn^Na#x#3doT0;-u*KoQ43jJPBO#?kL&wHO8yYeGRUpH4^(F(4kJl>5f$yns*2!^m3 zT(0DrnOYr|l?IEiA(H%SI`$1i)KvV6VfFbL*-(Bs!ewTM-fBICDbPoYy!>=kFiCYB zkxueztw*<&QS{bm`Z#LYv7@4Os-vp0m!Y$F9m0}=->am4(fusja0rUlr3F*isCtwf zJdhtd)kh8^y2Z%L`AB0U+#CupwrGjB=P^BAC%cb7PT88hz$8Jj=z%yQYWrF!dRG#lw0HRr~Ke36B>JABAM{M6b**^ychhO?CxvyX|Es;TFW{ zKy-K`(9@o!^%|qtJnWX;SmS3BVI+FDS6l6LrnHIan5?&~9G<+77r1s!J1t;C;-wbX z>5no4Ywp}Pn3D{cxO-^_D4aOdWcnOAat?WHQa1iMj{4Az)uY*LWvd$*t>rn;Kl5gy z6UgzHvBw53t%FF0tw5Uyq%CMMvRzo!e}m@m@z!xRc5P$9qQrRHo4&?(?w|Ksil)Ux z)*8eIw{!GuEmuVt82gA<#YwI#Njqi98sM`CduQG^PaVo9Os^XyL*7Gbv^rL?A&@T zCv~MMsN=AoDsVljAx)9!7YoJ-jH{pb7tkefUc8xa2OEEBcjh`v(iTh4zPWcoojJ_4 zyIZ6Qaf#8FMbwjLLJUHP+VWhq$0TN}d?2r8HBtp|!ok=J4V!26uDWvDKb8|gDy_e1 z=q|=Ki(dTNg~=k`M90<_bjnjCRlz2zq`BAcij5BnMri?QpN6vC-kZOs_`XZ!z}zI-WS8|H#P4ksL5>Rr(IX6 zs3YG5v^H^MHT3WLHxE%#S8dr_v-Tv@4C%g%h&tTvpTB4t0Gdw{O9f~iw1Gd0bO!@7J<23?Jo0+*ixPBbkvak;2>3Dq`y|Wi_r6%_aJ=nXuFqr zK1fq#R~TK!021{l;qn9r;%p)55C9bLVzM2YXA4UioUITJxi!opy8o!*dG9tvfp!iM zW;*cW6hEQ?x;3z-4SRy}X{Y3tbKq>@qZ%TbAK48UtUR1M~k4pY-@r46fX(OvSM^~nC3*75nh z2=OZ?1&WOGEb6>e6JyU3xTZgOd8u3EmX20iVLvRkfui!*VR5ncLbAP6elB7H)@D;P z!Cj-&I^m3uuBx$)J7r0V2~(W--L*Zc4u$6{(3}Bc>dB!Q)FvAYs_D#;>*F!A^joj1 zYa?)PTV8%Pb$H65#e77n4~K44j6n~602N&UJjBMw&oy;@H+EP*mvZPas>BQvfj9xgcABT_mQ6O`Li|Axci9Ht0bQ zv&w;SvGblEhd)35&KLT#OdY`Jg~O5b6odlBuoZQxnV%xjQ31af?XM=m^W3fUypt01 zg&0^}X?*K@?YulwqZ+Tn*L0uD5K&jUaJUz3y;+-F)0~Ce7o=PYtNdV^65<=Fa)srv zrSHxP?pZ4J!)~uD%U}?L8oefT?e=R4uG7yT5;PTh3F9JMjb5%MK`bDsMA8lkA~`Cl zQ`i$L2+oxCoM@PKlc5Z>z}R(gmP3(S<})84hAXOT}H9O(YKr?(`}=B4?9?3n>}TpNSH2AZ-&QqRM& z1Oq_O8+2D?KAd)aRaaLR@N};XhM7&n!hF<==Y9CzOZLM{ek&ZhsnmayA@6UEM@7i0 zfl2Ih)tQ*iBA0AEv5xqvhh*OjIaw7N?bEX_T#9oWg6Om46pdKrH7%o6q-8=~w zb&Eh;KJWHi_8HyvXu=y!k>k z@d!{zbNIJASU}SqrUd{{>4FS&%DA$XW2B=!S-M|$OU17)Q!dG6EB~%wD2Mta+s}zG ztgcQ(hW*4IhGA?g$*#?yaK+W@caE7rgh3 z6QtVOj=wQ4-02yK)4Oxpn}FuNa!bEQr%tn@PRywWc8wReeZua%K?$$cq&T6-&)3%l z3KP#rwRVO1dy!j#ydE};ah5bz5WN-|)+in9k?)Mj_-10&=IC+!lXl!g$9HXknifEwO`S(M3aJaieFzImP{{`U2;*Uwl6CzjaPvPf(QF2Ed; z1UtE%R*rn{|JEDH?;lE_%G*>>?XvuDe|{()DH7EW240ftkRvGLF(t537mb zY(XZJL{ez$F#Aa-?ZrObF9(M&l!^y8q+PT2pN4-tZto=2O$L$#$7j@T6YF$eWmUgc zT#(G`1$cnMtuG^vC)vXN88eJpi-2 z%CVC@G^-N%jCP3BaEpTUa3IN+%2vuETgrT^$rCTzMU{6iPN%rrIKqk^kh*54Vu~3U z0KXXSMRkW;QRIocEsW!hZp`9|9u8$NV>DZQ#^L^G^flWT(>;oDGA}-Nv36}0hfggq zQ;xR@PENcQQ-voMtOpmU&U^2TsfbBn4Ehx_<9iw)71W@U*?CagzLasl#R}h=H0!nC z$agAD%qJ##Ar)ooFp@pN8g55{eFX9s>@+Z`4`89jprG#hOH@^oUB_u%fki7loWR` z{fcR@p3U#{Q-Dt0s`zp1Z*^)tn?ieF5k*qgtY%KESiZjhVF zznJO?;8kELM_#H;yOBO3fymtQ<*WOW+0F2uf7Q=|X-x5pK9w~A`HA-NN^%=ZtRbE{ z*TX#+M*uW)A(|0M6r&Q+2vD({hMoEPw%EJh6|=Q?Rv}}kbFGc{x|Z`Ez&BtmJF$L= z3b3%y)2;6{k2H!94mXk{V#ddxMCjic7!&$XJ$uo&p+oY)vH8yFr$?Hy+Cjv-kZ1LI z5%KkqHabgdU{>VD`=${eH+HN1^g=6r&Z3;)Y$Y~{nHCN=W=?DxT~$p!>4E1rb&+dj z#!6of)i`pou;tP|F3wMnO1{Pymx1U=9}eKv7@4M-<9yLPs}h;%jPEU9)6cH$%rI z*SnK+EDt>LJIwnaz|Mi5hIB!5JA?UYF2o!Hd!)B53!dk>cQ}blMO`=v`V)O>IX4`- zo@CP-TYBr&BOTFSOx_dRh9|vzLigtb1n$LY>FWe(8kjAlN09vt>-BZ=g!Dx+9?}ac z#wTED=+`~2Wmv5}*I;#XkA3g=KN+T^&9!8z6+D<;^pl+p5N(a}!~zC4B&rHhfqa~w z=L7g-;^~j&V%a@JyXbtmvC_SdSdU9G33a*HQLt1f%>+nFxCtbLf=|*wxQxJ+)z*u! z@zniyBi1+b6l@y3Y;5bVIzGF9r%2FfFLsOl-N}s0M3i#8YJgXUT%fyG)~1M)WMyvd zW=>4t&%nv@Gr^JGuVH#A`eHfb`$d6SRtJL^_uUV}Rpzp$`pD155$cqJIa737!oeB1 z#o(~)E%{T{vo_WgGug|xnQe9Bt>!;F#xXnTJaJ@xADg_4`m(#8j7JIi@8VG*2sx;* z5K#XVr+zx!y5!dG?SS~?kCryKwzbr5J?#!TFVymejvI_%>Q}ooaFI6vFDB(>WR&s2 zNY~&UCe~T90AaSkJ+xBE%17qU9W}q)0J}fDXzl{3*V%*C{cOGp%44~@4B??E(t&&c zlpQnm)f{q)!ASw*TvO-x3f_7j2cOXoqF3OLK1zJqdqU>(RHS6fp^c@&sR#vUFwrb1 zD2pOdPJ1u{ml~pEQ6GU>ZJc;^#U;H<9_F1^HB`r@aGemHz9D9Lf9;WnX`?h+pzxzs)S^*nUV46<-1vvB9LucUTo>0QFHVrQzAr~F`31L#4TZ~9NW=HyXtL4=2An` z+H@Xua56aJZpeOfC+}dMaNz3;Rf@^Zmuk}A z<($b#8;jvf@8vZ$XFswe;Zz7klo9o_K3H_;@U2oN22fvW))RR;3@H-6$s|1s>!O&= zlRtRm@siv+^(wxn>!(ZGy8WCiTQfb*?@y>u&ohRQl`QLQxfGSIiCIC4F7^cN@ontG zVIP5tQm65)3zMSdI@GAQM+FF)clf@}pHkcN_j}ty5VE{=x=nv1`Q594{IZQsYIQO! zA2XA?YuRxuNY1*($kx)XPaoofE(#$;pIU4Vq5~cv%r=M|I@&Brfpm`Z8zd79!AE-tnBhG zsx@69GdMh4%yPD0XZ@4WEF;0tWb^Dv!i}rz5H6YT3gply3eg(7t#D=Rz zUiMFKBidZiUQ9EIadha$^2?v-{qQ9Pa+h9$grT_st{m(fDhzT$@jAO5 zC7GZH-MJ)Qqxc1CC7g9q!v?*8*x6Mr|0I4)DnRg{UfW$PW+?os$t&jz&Hke?VZmtz z1&Yb#^ZF_?O1(3K;{g#1$*)J^Dxvv%)#a;xd(OdJ9++Cx3s9wF8#ZizS``xA7%+!r zb2h)h>!M`qj2Q?hcUR6WEuOr7Q5s-bW(;8k!A}e)A^3?Hg!nS@Wb+g*myztDQw!W>L z6l-E$zu2%)UQunTpgZP#C*q9oPF)sYFs+yCSI?{uW3ukT?dTsG1&AW8fp&HH5?3J= zo$;#UE^?jA4^wb*u5!mtz27n&5MWsZOA00(wi#7dq8RE>YV7Cq0K&A-ls=O$E(Rmo z0XOPvtBqG2?lU&*5yn7jcc{>ZoyqxH97UBzs7U{p{I2|QcRb+5QQ^`b}i->VC`gZnoZCg z#0VN6SDfn7Fyq&Z`EFa@+q$Zj$rZ~9w3y9($UHv*gr4GrC7X$z>F;R<6j}NQ`y%M& zZ@X4rv(_DpS~5@bgxZ)~R<5g(`W?8pC z*Bj~fFd%IlGdVfAwdpuMj%Z>p4g~EM1`12#Z2ib<^jr~oicIr0OWHMxo7 zX~=D}G@_}{^3|P`l_Rll&fYokSbKh(SF=Z~*`y2TX`mqq=xBga#}Uj;Q6{tuQFz*C z6^P>nIVCi62_ybu(u}_A6DMr1GD1&$!Dw-FK!CT*ON!`svFwde3%+P1OzLV!kB7W8 zJbyw`!GM;C?JAcn_SBvVxC+fiKXP);1-(35}^#6P{hP{p6lk|Y> ztODJRQF{r|pub@MR0HNAl|sAyNBbP(EKuJ4_Z*}^Bi7&Jmxib_0LKZignH*QMSf#O zA%T7|=~Q7DSw(x#{>g7X{+^I2l^_FI-6VND`~EH?yTKWcbVBW&|LwVD$Q9{@E2|8h zZ+`TELZBXmZ_;{bS01eA3E*=K{$j`%Yp0 zao{@nH?j~o@i8UV`b|ShM_t0kRcBC1N!>&C9DHZYovP1Y`Sv)4WuaIe=#P(7KgU3W zza*^H;HD^W7Z?$JYq+=sA!NkFR7u)Lm<|MbFhj&Cecq`m$;tBYEKdocZQe zo>&W;0=CpSVx1@1AFAtR;6l~u%@Rqf-xX?FAX!;E6~hq<5mJw&%!`aG7SnVr#d?#Q zGV94fP6!r6FGjwNnnjW7`r71$1E9A8aFEFQ*8tTkOg?G~zMnCA>rR^Xq-E-(+|P2{ zQ_P%M>4$*+qq#5jnLuv=^KTmv>K}t=33r*-laCi>X&fq@ITmN1rmb1raTFAA!ncB2 z;X42$d!JtIUe%5C2^v#>>6{;=rId24_mxv`>~e2Wa+;^7et(zudQd@-ck{bTbImrQ zpGifVS1(+U#0-oNL7tS2(h4am^7=ctR^ zc))_m0$%fbKa_KQ{=D*Lst=rpR6PZB%Jbkhnuw7tU@k?@xM-4pA*$qlLzKT9TvgsLEx<9647=MKDM8 z^Dg(hu-cENm5;KMpw~QitcoCoCqHAo$Yisqtu?G6y;^+*Y3ZjtjSYqYL@trZRR z%)d>hn`F{y@9tS*yxeM3BxS$`fby{V8LUvRD>|?3pO}&%MR<90Sw0U?*_z40D{+7B zKsRHZceiX%%6~G|o>(R6Sof%S!OnE-(-lo!9np99t&_|qX2s7`uyPTlEz_I!ZsneF#$B!I%;vSidPM(_N5xRry7Ib1PGA^C{v^gI zUH9UGm2<}4tFqwDVXQ(I>H!wBs!y*)Jpr);Hm>3GH5HZaaZE=1Ng20CU(>`SHpSh0 zgK|pUJz4sGF$tpPq8Jw`yvQ~0vyfK1_nD~-aT7opk1igh_@lGUARDYemL@k9mw zm#iTd8$HtK2i3k17Ir?UFK}a5ycY%`1~LwlEk(i(*4_1TLTuqhDCA2KXTge!meJB`g|&#g&#RRX4bm-)mO( z3;NEzQK5Xm@<94-sC`}hQSkL(D9{~RS`Uo)is(mHnzv1X4`Ti0s2WwNH@~D%?4@2WimwR zsXk>94AlgD>(om9k>rCGWhoqci%)z7!kQl`ZgU?Kd`2$&c7X8Hk#Tg7D!&RTcUu#} zK~DN7ja333Ft$(q%STLk2sz1MPdf|L`Q_+y$CqE@$9|Hvg}_!eb3qeeLH|OHYkeyZ zF7hfxl6Qiu90!)r#{E$5I(uKNT>Xx`ByJh!{Hf~G_EuL4znIfH&XD#>UxLwq)UaCuc;&h$*cewBjwk<$ zsp@zg!01RYCm@Q~;=2Jw@$LO@3Nt(UK+AikvMWm42{dkdveh^Wmh~Ogt}a0QYDO~4 zy53xsBXYSQ!^1`V7n95-oF(D1RML&1rf=hzZ&@t*lK#<3^8A!Ibf*kTGD{Y;b+>}DL`VQpWIB{&P zK5WAI;;LJ@f`IhpFSphrg>JozUI05GqY!5)5UL4{jh?S&J!i<_B+QoDHZdzix}N=Y zVq%sf-Y-cHx^X_rk4Z!R4U;~VT1M4JLf`E z&qk>`Mz5e%hr3^IysN`NZL7R#Q4Uu=RgOxV>30;I$#F5!S1D>9_bnf#@DM;v^c#318z6<}Kf@dIqfAZ4U|R>D?2C|I>^?TwRcijL9kq~4B*G3I#o8w&w$6iJ?T zx5>?RgbFFYP-rOu!N(Z36QXKH#Hq&>V81)>?~FpkJ{Yl9s-$dl#Y%HF^oodI8u=MZ zlzbR#j)341Jcq5>1(k(!ogU%|M?1@n;UivIm5BaMkCdpcL}{_{^#$wmg=Pvq5; z)gE|e2UsSDd7k9~Lgn!p(<_iO&X#Are=*$&11d)L%qLly+?H;#2AXGqI8v+I#w{E` z4kvEA@HhdAHp`)J)nCx7MNrk!>@qde7p@vSL(}SVhCLkjEg$KINRfRo>)0G7%uCy&O^0*M1Cq# zv6_m%b4>L1fvS)J0eT$vz1GH{Keq+=^vB}TOue|atAqE0U)4GGiH||YV+j&`l9TAy z`MGHgLsgIR^}xky`cf%bDwpf)Dfg|MS9Ys5gW1E8Pu#d|;fC~&jH`$dFfrOc+c07s z7ZZHEEd@JipakN!o+?ZCP&-rZ;%oL86GA}Hg6F;loO}kv`h7uFDs2H^`x#h=Z~bBw z{tZtj<8);eDzc57rst~SHDf(}N2EGYIs9Ijk4w?ZI=c2DY1$7K5Asf#20OllU+L4B z1=O?N`4yCEIJ0#}FwykE96blZJ@S0fb=*vk&z+XX+m&If)6sp^GaDggxbVoMusX>ckgI z&!{saTXbk3syWSK_$Mel&^)Tq;eB_qUNNPJdYNE2?CqodCfltxjvtz7D9h}ebKvdn z!=qqL&aOO`XXA}(ZE=?yJHw>yiE$Tcf#!x|f_U6pE%BTVI3bH(tiQ(M_#*wds^s zV=DI;(+oheeW%gsDUAYDPRb@wA`qL!>J4hHXszf6hVn$YkcdZ+I`VH|LY zKJ)&HM9|rSzWFGE(k~aFQ!9#=78tG>`Gb1p~9L4$J7r}qE~?AV&Ox4|D!vD zOCzotY6583h#$@UpE+@K1LZF3xAe1w!~EAfd4K2P6x}KI2Sb))Adf z|3JHe%YM%nW2;)+Bb+BkobCPhw+O!N958a4M>w)IcigC)|%lj^uak_6m7?dsSB@5N9+U{u)sPVn6;XRQw9!r4iI!C>3pf+`)cyJwC#WjifZy~Uet%}*~m ziDW;{%J3>7@^p+KMj^y|2|S2F%#ld^F$Nz+;VXz3{lgD3IjrGEAKfDPS>`1;druOhX+-{tv=d6T2 z^4$V!wd`dLOwQTNv-4cI$sw8C14%d3RO#n%-`^lx0+p69Q%Ia%!4(4I77@rQ6|IFR zbg+FbUGs~s{51TU-#_5mP@z^@o~CL_dW_A;y#&RFusgE0w$$;#XdAF(kFE)X%fUfF zOK+`oZDt1nFx1jyqatZMocQN1E+}G&BEzGL2JZkaPN`cPgEmKe-Z~i9o(mZ;@a6W+BEyK)G1P0w7Im6Y5n^!m*ceM>vw}Z>$k3iP8|(d6 ziJfw1u90=<`n=I|8Qm<0xR`Kxf&NqHEQC<+)uI&|ssee-I6fO^IKQzGEo{`+9xWcT zbvrZvTB18cX<#oIzOMLsJ&0f^^68a`aduLejs38;th*57) zCQK+Q_yk@`4*4N5@>!`*hJ%t>UAq3z4sTSVOXZ&HeRTqVZ0B+Iw_7@_JV1d!D3bA& zf2kv3avzKLZ6J9OBtqdvB-gE_4op>K_|8WenHXDL^l|2(0KsIIR{>8?%$;DRA3YR( zlRRSsC9x|b#*|I{3pzbB13k?a0AM?Tz0Jb_Y>}#HAEYUs zE$aur0Ij+!+w?;n5H>|X>!;!xv%YLp;frj&tVnl{6OczSR#*)jRbQ+|pl+jN zL*(?7cKWT4w^Xagx@{P_!MLyQiKv0fgqZubfwe2&Ep@A`oVj@_?Rs-xUA?+uJYT_h z{3_WP2p!1p`0Ad{JC?`WQ?)@I(x2_Y8cba)ja)nE*M8 z_>^%hQI_jSq)k*#^!SgY8`yd6(+AMgu1)oB6e+*2Ft^DD#!+ARp@L7{nS7C&<%vTc zOW)+rDd;_$)v=tWHU*!i>M(lk&e5}xF!|@r=Nm^A?^C>p1B*-6bt4(`D)kYcOk66$ zw!R^3M5QAxA4>74`2f1fA&uh3-@oW@6Yn1De7~)B>{Irzr}sQTm&4cBJ-@ip*Vwut z=jgm~kqKA2N9;(fH)drE>g%%SoDd5HIGQ~avBbM>{8Fy=WV(C^czK^6O)rPvMm|PS zF8GBYRQ;2>jFAzk%_nM!b`?dIlRe(epf@%>UR)E$`74f2cXOWgsA^&YyBT|qvE=Q5`%d`ZUtZM~g zV%Ovxx>KUZ1-HH#R>g~F-2PtG~9Nm4zf40gP!AKn0uOnkgX4+eiGt z?ZWI84V0ps(sXEX*8;@WO1fL5Hc9yNURd?$BZI+`tT(Ki%@=NWF^)$Oo2oMaapnU` z7esb_L*c=yRNi;w_dye;AP%R8#G=FDt)4Afr@88;%1g6TmTUU@g6XUDB#9*5JVU5) z-pm}CVTMK2>;+^cVGO^bL$DR*>msC}+w=lOS)@*~9mxgz0xpgB2r8ah4y+0v;*eqb z-uopg=Uk23JxdyQ84*?BR1K80gVD)m(OPo^se1Zx!3#DOh+#jUX zWa0hN?aZ+sLXOvh1vw$Xmws|RAmjzUjf~~~@>&uv)mf${rqe4tL*)xn;mMOT^yzyI zj@G$WS*@3uz}A}fPNeTiZt{XnUmCSx6-L(25};rJpD*%6XS%gJMcy$n{*K~{3OjSE z<#K8Ygs6j_)M)Rs5Aw;Qfk)Li@%k<$C9_ajnFk}$Iu|sVU*_LOkE671^q-}Ub*1Nx#Vmyg&9_8#jjIc~7j4XVS zaaYFX!E7{;8D0|O&8Ll;f7lp`CFtMD(cdXArwT3|4y2vB51BcGHH|Oz_M5AtDvbWnCz*=S2Gnaz2X&y>u*dGwtc!X`PMA(dFf9xbc1Ql-@oA@+iHE@=Y&L@oew z3~)P6eE7v=!Udrm!O~0vXSnxNXnKwZnh-_uw3A3$;tq&(N&&S;X3ZVVqi^hNCFvQA2elh(x1EB!oW$!G|$M+1_8T8n${FYiW zgHMBzFNq?up(vlgxL4qnM?ln^Y3dhKbeT249+uL42q@vVM9z5ud-fSQ^({q5}`QJ{W$u$~`x1z8!o;mztdWHt#@a&Wqd{`PC;t3Fe_5ArkIe$BcoqEZC z`li5+Mi~C~rl8;7H1*#YNBV7r{%+hess!MmD{z7TvL_2%$o48Ch<*?V?;Xkl3RM2G zCHw!!Ede%{W8y3F=kedS62OnSG4P9NX@#1)C#>`5eV3qJM$+#BoAj5>eDyCzY5^mG zNtpa~cJhBd1^;5;-#zjyJsG&aZ~wYfO`iXa{mA|EGXJ;6{yTg5`=0!#r?%t&cc=D0 zebfK@`zZ+8;{~M8Uj%_Ua2zVm4$>^`edtYs&3E5^m*w+w585cZQ)C6Ah0pzB0xE;) zLjD45W=hKs(_UFYx6wW|nDYffOG;;#2eK=Me$>+v`2RQ?|3ugnXARYaOf|IrahD*p z+b1>|2>Ci>;(XTsK{SF%vTz{Be4`wg@)D3#Wckb3vVUXj|NIpO{b};%B|};?J;;Gy zOoK8g;?-YFTQ`AF^&ub?npLy`nGXQ{?S;ROYy;f-(GK>%2zI+VP0;_yn zy*~?~z?pxsO8?>5f1tO1lS6+azykk?c>R6k-#-=5dH&$-OB#RGp5~=S|7?|Gep_YU zKgqZR%42}V>CJx~_zB5>rulxCAqfmVFrfL6f7N_Iuz%%I#@d!hseI4Q|!BI2zhy@{Wmg2x*wti^3_ zo<3pk$2+w@kz-nvu*Youi)lg!oMf&E%BeXt99B5eraNL&zPt3P9@#%5?kJ}^AU>LB z6jWcr^Q6J|DXHu)Tk(gv^S_%ROMkW|M*hEG!hnAM*AWNIy1zC0>}vjpWW%3r%m20swZ58glS|Qe^+9)BQ*G<-e_* zeixg7HTqwk1)!V$#^?Cw$M4@!;QvCs{P__5``hw&M(w}-p1&*M|KvS?H#Q>4hAfOH zfND4fI7R#HIP*v_?ljQGQ4jz)*;E;8wZU}1t$;4)h1GiE2(mqT0`LtXrmfAKL@et> zioAF>?rN#obned~On=qjwwJni*W&##8JoK7jPWQ~jfQUYfqw4Y$ml;99RA1Y&&>Ry z0Pl^(i?Os*!aqO)(?T|o(~L?2>FgC<$?PVDcYl;N{qZRcpjfq0f?5MJXu4%$ak(`| z%XTfY&oEIvtV|};1Xu1g{XS~!IK+|S@pB@Ch%M)Kr_=qIylv)4jQYv zu*19AnNn`GJgDX3rzkaNC=>hq(S=9Dr{86=9GYzAVN76;Ayg>kgquW22YVz!ZbJ4{ zkcEd0TD|qfZ9dw_vo@47DgX?piDDui~5$I8` zb61)OvUW-oIuTwflCU6iDje{=#aD1Z37V{y1u?5#e$u>CRbtJJeXY&So&=;9`OTwC zHh)^frutwBSJdBLk4uq=)#E?o@P^xn^TXm@!GN{I0x7T)oCWBdMRAq-)WR>}-Prx( z-A=4r{;}U4XqUrAfSL~p>cQMa9zA-?J{w2un`VueB0!$IsafK$9|4`AJhAm^8Eo?? zTkiREV zkB3P5O>NsUB_Z`$`lG?0qj5qf-XzKP^0g|uZ6~Ey*bQcy{A|M^*Mo_v1Z0eyBIq#f z9>ov8?m2WEUEq`{S}`g&XEfSqq&@IHDEg-^?K-{Iht0kfIGzp=KBufNjo^wCXTZF0 z@WWh6{wx4@U)|JLUAz5!Ir;nKBmed~HR+hxdIE&?n9dXZNjzI2Dx0*5M+ zfkpCw9|6#@;`uQorAEFvx&XfVLm|{kpB(wgMyGJu>gle@i`>2S`MA7$SB*>6i39F8 zw^1=b+bK2{sBmq^3c`))@#<$Sm!-^Tt_+cfK5`- zVy^S2=5s*So}1oSjGD{MBHlo;!0%TLTi>W3Q}fH7T(N>C_gi1W`;a!@Gq#TzfIv^@P;xj4MO z^D~fgmFYzwilSDBxy#|P>X;(R{Opf={lX*MQ4{AhjkcAw-Xv|W7}`8eN{;_p1vW>` zy$9+C?t!@Ad_bQ>LMV#4@;YZH3Z#Bcx5kXjU+`w%JVG%H`7_SkXe&e1;>L-OA4VHo zF%}==7$w7mI{nKtZjVE)TR5U-kACw|dm|8C_(UpqiRsH(zw5AV6UH&0LW#9hV1+MH zmVsu(TU~yn5&V-}fV>$S@0xn3EAXg|7S!$C3;mCA6|RxIT3@A)+V4A9Y1wM0EcGA- z8=+L#>g*MgG+c)0wg#N_&Er36F5k>!7jRCCBNyQB8zf6jK8+rFyyV=y(H7Sd&;^9x zm_O1{z@N@%m1}0TU!bYYa*eP1NS7S98g6I6&1$MUH1<3nxZWy_y?kZbT*B4BaGgMv zMht^Mj$T=>4{W_E6uD8gRW)SC3 z>#RxXb}XwboXug5ofCeHWDAr>s~I}j2gt7HF+KLfPyFuy%%F#Lws_Ps&l zI7nk%UJ(9>n9z3J@#Y9z2cLBkXyqoE&W-%kvo$o17l1z#)w40#j1$HF)& zO)@bDZs2Riu(})&k|iaPXPTwq*9N^&$DGu8VNf73iWB}^SD~{oHZIXAVDu#M^{5B0 z&k3WKM;4%-k#ecv`)V*kFb}RKY=ds3XVLu%1lVdC3in z5NQPy%G6vx4^E3F;g=~#>?nBFpXPk1-NgB&R-S-kiwou|6l3%rA{{-S;Tl5kUl27Y2rVwYDb%7wxkOd9)*_&%(lzPNcYwmZ zwqnG$%mb9+PPON!}@!+5$k~k88ii--M}c0GvSz?w8&i=6D@?sg8=xa~`iJ zr@92&dmUbQJ+F7Ysh#84`Ap>luZJQuE-;_E*>a@Vl zWl74oRq*>Gt7eU_)2Dh30%c^bRMCu)6i-tU01k`>3!9qJM%$LGmlMsoW z+K;tUw9uYDuOfqb#JSGkc)v0C5iIM4Bznj!=h`v4>r1P9o0*3hs!k^UA-!Pt7kn?E z>^GS8Jg`qYxoo+I;eQSL9BSi-Dx|P#Vhvc9zf} z^Hy-BU+B-QDGkO+W+JjRD@xfHX!~o+ih!^>kTzc98g@9AF_~ZFva1nrqDqf)x6QiQ z39inroQuUM%*32uIP#G^MW$da8J`eVFPAt6L&vD7V#DFqxlX=cCaqxPi&Gjho;}Pr z#DCc7ceKtYp3%l!_PQKk*ret|HKoZ@ASBkdRy#3D;vD!SrOb4aEX!j;I-HmPA)pZl zuN8f%y{E?Z$;*}{iXjbLF?FdS@9U23X-5)EC~>Y3osmtSqa_~=drQS`%D?>La>^rY9{aK_U8rg?Pfm2D40!MY07!qUj;Am3~tR6*~ez|(>8FN z?mhfIdFf5=!!t}*XW}D@;Xr1!8N?X$O|20=K$>_C_c)m@pBy0Z+dk>bK4#2E@H~iH zbBArSkOA=;3WuMo06wM=y_xJllXX7Du@%haErc{AxrV=&CmUqofqs-htW}E_TXRPW zSoKOx(Lqs*9J6jCySJZc0z~8*5F}t>rGcs_9=HrpOQ2f^dqjPIL_>j-Zz7b;bElt1 zdEz8;FXHAx_B@k&5t^BQF<9djHHVov6Mc2vX+Fb-XLA{@`la zOGWvqRdN!&3ULi?JA`xuvB9}U)C7P^3opu@ZA;2CpO0vj7e3bz5_G{N&54G&u-|}9(>@@s@7(^0vvC*KzNGEwd_)~fkLfo3d-jS)CQDw+U5r9-u z9V67NgHn9oyxXWwyYobBl*8&yTPI(6DTXcKqdE&>P&HTh5K%6bRKz%y5DDgiKiC~7 zGtNY|Uo@l_iWQggt*Km0!rc|VyI`t!yW%7K6s(Ezj09-~T0`FtFObZm6xrD{OI9xa z*!Nng4^xJHwCiu^tqSYzTf9rPehN!*DPY2*#C|g&F=aqYt}%!r+L=JrgWB-{FAO25 zti2^2^SKVusjhtzEuEpNjjTKyZ=h2X1kHZ?OyZ#OtpOg{btGr=IVhND0^o3{tYJU= z;FK#QZsO#_SUNLw@9%xcB<~qt7glz?k)jq8e&|z!R~I{m@OqUA_%B~Y~UoIFfoNuWvRQHI@)Q91{v zw9ChF*6XV6GgevZkE>oP+IhV=4^b?9P!w`QLM>r!jG%}9o{n>)ajj}nTzT5+!L2g} z+t$RAxXygbH4&i^?1$xFT3eV2ZJ^K}w%;w4BtUr`COCn-Ecx34eZcPThRk;>Tl z^2R3RK*NZi3r{qVNU6|Sy9M{tzA4`k4?2~Sf$JXXh-{H(Fi}<+#{!613=X$&1`8=} zH$hFLz(3h;tja}7phWLpb90Se;e1f_5to&%@=!r;R;CV5WQFKz9F?n@R4CG! zf@&)&0+hpBeAx+#98m9F88Mn4hC*OtY8p7{EQPj- z=`RXXy6*f!A#mEEP@QrDE|lE3B7QsK z&IPpKvwlD{JSFW$nJ|tfP+4eKhrEL|+S3d>3{OE-?=-x(T=}wW?4LizH_vZW_v!`S zI!gJV#j~xGlV$fP6;xTc6XiK66=+If@F80F4cDl6a0KJ>h?^W-FNZSGW>lPZJ|%vu z)4xQ>&caTjhu8Ojg;d5o_dw4A*jUYwddr`sMYXibzL-L2Rn^~(D9!=Aj-Z(*v$M8R zj`1%gALa8fmIL-E0thn46iZ5J)? z)ZdJ# z`8*pHTOu#tXbpS%MbRpw*dlKBLR<3?e>bZBsrL6P`wY}&Adx$p5R!Q~{RTZ-sgno^ zo3aK9FAv~jge2~%YSz^cuM?vC`XY|`x<)I_QtqE+>wnf$HlpjE?7U|DTY1En-|IN7 z04MjQK8o(0yeDQ_WDeRo2IT!qlaQi?F9~&K(UPA|{T)Btfvp4@mpOw~<0GS2_zDpO zh|vhfF=P~05Fw=_Me%DxIa9?-N*Zsxbt@BZnLPL~{>rpS^5VVR2{17WppMTY_Tkat zPZP<$(pwD!+yWI8rM`2h-DWnnA=av!xVfjv(}ZrS z&_ti2NGcSbd5U>IZ(r&Ag!7&LeJ=5ocaEXh$Yrr7rIXfzDxDCF9}cy)EK~%)MG2on z9xVNkRiW~tYSmW8#=f-eTK;BhJ>-Z#_6{WuS7hthhG~9V`eV1R8ACOw*DHdB@93O* z{cwW~;f8FHWE}CAC$Cbd)cdptlsa4nOBdtvXG+N@%JI9;+m$al-=iS!j3j|7rlvYB zI=oPMhl~Cwb1yTm0*&Z{RLpdRf?W~-!x};{DXDp`##ur!=(|s(H+=y=AwDVjv>aQm zc+S(_3~HtyQyCV2@ePJQ-m%s*D@XV+{S$)+2#bUxaq?_%Z6fGJqj{bi)ioyHKXiEW zUd4C!*~h25AKw)|xxiZpHwC&El!>C5lD2Mmo@YNDiH4I2FH)#>RW+(Cn0q~Ltn`H&2g#enW8UF5-0ekU0Hp@Cn?9MQWO^QD}Zhb5Ij$%QNk48u8wiW=?bFHY(~Cw z6gfl5_3_ZTn_glSix4i&qu+Ovp~6qKK&|geH3-V^>MmEMu9iQXwqlh^Bdz0P%A=Q`(k&Uqe6!#P&Q9^~K@kyJpTM(;x6IJJ_ zzIfdA!^(k)coR&d> zrvOPckj_BIUXaw=&-Q`BoEcFCRgeWWw6WL=a_Lmc5vvlmc|NA6lgm063^$!uXIt83 z)^1Z0I?kui1?e!=fZHI;EkaNptEoV~T7#EBwr!F6^Y<9tu+WM0{W-k1-`M8-DPuB9 zHF0edE#lJ03ucnHt;Ny|rW`19*RWc!^;fkQffo7&nhIw|xJ)KF?F>QK6 z&otAsH`6k_7_*yE|Lp}$6?J6rlBX%cbjfKnG-qt={jT+2v@Xi7xBHMi)~d+OCQ`s^ zB6or0(SU?Yd!yT0W!^uvjG=&a>c;e2%rH8?DnH(k8m@oez*W(?D@Xo-W=EO8ZbGgV zKV}Q`5*(UafkuJx`pp+JuNN>a+#yf8Km`iz(B%?8JJ3stDu|8=k-atiO4MjWS9q4$ z&W0ZH&G##YgWO9HVA5zKs2Ywd&-`@zgw6ndKAaD^POwnZPSo zm5jxZS9i!_JX4M#C{?iFjm^qK0SuHA7v!FvLt|CFY-vc5(}7Z7R{FQNdOgKQZb(>_ zmrB)F5E&BKowK)%aNDaFbhTrDUDGB2&3PHp;KqZ(SHH$#op`D@PprP>xx9L^zfnv^ zY;?ebM^U8x)%0Ib66hvQSd?WL@I;K5X1(U6*KFHS8Aa=uas|a=&BxZ)mYg5#iwG*{ zlY5?-P0iU;DB2{SnXoQe(y9(rzsJ&-B(P&d!S4WeU7;EzV>D=rKeWcb!=*eGI5xEK zO&dNp9gNjFSKD-2s@lzddN-kfNco)g(;C@jz!v#TUc1-`a>UR$`b9hgiDQdEfkBv z!f7qC@~OuC5q6^(i@nXBy-!H9wkEf3lxe=vKJIj%?_=$C2`mCK>f)Jl@8c-tsCu0= z1B71ziiAZ8q2u8~pXV#k1&3;C8L`)-o=dd{mEm7rj6OWPg;LmQ*$`X66Io>M!MJb> zTNK%6a2<|}#MTX*ZOqJM)>4Ay7EAlmuZ+*Dj-{s`mpn_~qpFel!S%GhhN$V2az1ML z%*{8kuPb=KkzUW)8$g{(UrI-bFPvds3NlU5>rD&xaSTzQx{ub~KU%^NzY=Hik;rgH zLfb*n4h2AQwXwaKCL6vR$e?rFHY*_$wVC4Pp1elo(>9xi+Y~jUT~9eJ9M|UK&$<5r zg8~x|pxpQffDYaTKX1(HYTe3EOQhj8K_l%=Rqt6ZDyHy-eJ{e6+`?)*fn)%{{6*ywuk&yiCXzx{a}8V(yHbId1oKs5I-LDn zqeYVHA*doPDG}w|k1>HvsIUa3$nMev&NeDh)ZNQYyGTO$NK((9Y4P=bsV9wmGK#`I z^#VtZgYt|gpiu8uvECoPp00Evmig;{!hH}30)dmK!7d%1d;38({0oUc zp*%1e?4P6|f+(X90IhF5<_(is&cAxJm0o$WRbUC!9N{@n?P&=1cB0g8eBTQ^318NT zci{i*v-#Q(x>hs{@P$)d4ns>>gHASOk}cLAlr-YJ zn4?y!66Z40tleVD!=c~kgxqD5)>@xR_tPn>$&Gh!X@2?0`$ko;^tR_?Psd8wIE?SJ zvId))0^$g;NF=1yaFeZPU5Sn#=p>2n>@N)}o98|VDXW_DDj2k)>V<1&_lL0TbJ-p0!LxhEcvkS0pV1D&N+ z8FV!{-?I{}9lkW_SQ1Abh*xa1kJu6)Pc7*>OO{gYa`|ks$?0Kkhm{~;hmUsxv?3Go zYSIndW~?@@<7I1VRcP=MN27EWilYR_S5(%f4zVaPaS@}d^m1qw?4Hw%((4GyU32+` z0S3Ek90jY(l0dnAjZzw2yZCigB3$D2iyXIy;j}$YKkqVN zpopvWuRbU9?|~ z$c_f0+G%OQ2H14FW>!2Oqw%qn{#Z0V*N-a+NzVis5 zz6cxne(X*1+ktyv)M6atXul4N$ABecvNn36g=uD2BVFr>>!IO}(bHb&s6|h)vtp$M zo8mSZ`;AJc%s9EUq7U~SAHnE8_L!GvoNuasS#pf*)gHuhCFpDr%&D}%_nkW9s?8!v znyL8LZ7qHL0S|{(SmN9VBRxFzf?u0Hk%kvTdvuv6_A`b}1_Zy%YQ7j2T&~NMDEvTB z$?hqb^l=l6H`&1VGv*7?p8#q5XRg4Wc^@Vd?CY!>fA!n++qo}Kwq1NMgIE0J9>7wCNZ|mAU0|n3uZ$mWY zC5k6Prl8`n@=U#@YZylc`FK53Esp+q2Znoi#Pj;G9c!ss{l|O!cljgC#O^SVw{EWr zfpMF+Lf#`z&lqF)^;K6SA3d=SQHjpG;Srm!TOJ>$nJzcC{Juc*k5fZ9X5$q9*M)4q ze*O!#{e34WQ#*&4Ezf-`dJFvNpA?c>5Ze>`+G_(~5aOFBOJLOltIE+~UYQ4rPW+6P z%PC%e9qrpbx!q7uMQYU0Z%63|yd)M(muyyBG^$a)+ny)W4V&n>9Y(F#V0lol zy6&J!&A z8rAiVZjw3KF+n~tAr?Z-*TiqAw)gGSya}p(`gkG*6=)=@pay!+ET;98Gn;At38fAt zP}C<+N%DbVj7gaO!Kxr1vE4FuB^8H0cKA4tZU;@Y>j8el?(T96b^ zYgR3@4!<6A){=gl@o_FO*uaEPJ)mo^b?b7cVfNXd4f>xwZ?B)eO?xRr!*OIR4l!|y z0&IP#dH*^~Y8o-oQP& z8$RI%)Kj}PRvvRDOs4)W6nOvZLqN{7$CWR)M z2B|p8Ej2UfEOWINyN6rIL!8G8QlLU?WR?tsrFWbv(jIRZso(i!)>(GeaJb;{EpjbwH4J+PeZ0k&Dg?I3?lWY!+z?Ry3`G% zXF`)7Ys=rUbiTT818dW2CqteiJ=Z&{&wXG8hgRvmi~Qvne^jCwvK;MVm)y?sQmJcf z^wY``yI9$5bI=@E>^HtMzwu2(pMcKt7t!0Q14)Sk zsr9PTAUI+t#ul3c~z62_6!INf?Chww@`yi6(;GFxmu z@_Bu0<84uU@#^Q>nN3Ezy6ZM*N+lda8?~DwI!tGtKr7e!n9()_ZiM>iF33 z8!ZJ_S}NbceY-{P^E>37WD9+_bYMAh6Z2T7xg}P6F$dIiLN0K^#zRbs&sEW(E*5&eGutWt$g}-B*~noo9S>6@5r`1W!f?3 z5EzobUsrL==621LPuuSHdDP)!$bHRLLeLd3(*aN-W1Kq9*&b`edIBYeu@Q#`1ifUF z_t&I*CUsBOx;|}y`@S`bqhK-nC?o)M^J7 zz(4W990a$B<3Nk`rp-|+qw>sQ8JAL$3d#fX3o9(FV?!U6EZ+$`;}U_X+;`?e$6N`D!0-l7Nq&n-@yrMIXCQVka13yTj&UKRSWZAq3R#Hc~j7$23w z-bZQ@+{Ez?fS4l`v4eQV&H7GsbNg74G_Rfaj-+RqI))-T8X@vdiG6 z6E$MBz;Bxb@NFg?Hfasaq3E<$I45khE-cZ?is?6yX|j|&0=W+_Eh=Tc$wu5PYyNqo zYhlaJQ+n-Zonf=BZ4c9q*#~d_Q+RXx`(93b$Zve}CHU9dtK6T%ncEuw1AuGAg|kV6 zCvuaDo1*ii9HV*Ca4EpCb(_JXK5fyXBPEY)8NJki3CnDD5If^1A{ddxf6q*aQOgsF zV|utj1ys)ndgG!a9hH%fjQimb5@gHxk(a!qW!vMv=wO;dj6v@Ey9CQ}XHBPt_1}+Q z+yx_@Jh@yk9Uah3+(@L{w%WxASX8H@QgOB8y&(>i*txFBF?M&;-rmG2jAP09==+9d z+50WC@921UHS4N&-OEsMabqKZL8Wg@yowWQ)ebxb^1v-)t}u=0f>l>&S*Z@;stqsq zP8?Uykx2j8uCmAK^kLC5f0cyZUn@C&0U~sQ5DX~EK}ccEID45cy#$i{N$7P6M`ujF z%SwKb#t6wuewcMULrQ37`i4dQ+iXT(oPxMfhIHvDPl(qM#PDFTp)brno^;Y^0o}PF zIrtK4VM+5Gn8^n3aVuQ@Z1mkaQ_H%^kx%oeeM7d}#cJoslUXcj)MO{?HTQn29&D3o zHK5QX&D*>%KQ_`jz)j{Bh8QcXw2ZBA26i_@**1n+h1O>=fSvK+0mljg0hsee8ext~csFJnE>86FOa|?PlmAM1KzS7lsm*0?gOJ(=0k0CrRE50pkgg{vC1-tb;R8I*@ zSmwY%ym&E^o@qcieZ&57?|rz)gd@Q{a5z-LPx9HoOPL_q6CZLamY>S=UBPAHDEOwl zYrJ(B3kIlOOHK@Y%1zS0 z#qV==ih8o0%e7G{(;rA|1(jFr(hr~kwjji&N176)g7Tr>_VMUxPX zh`Nvm32M>#t+sg|lHR>G3+?S2YmNVddLjaHqTE>xJgb~3+!GduQZm8EwVE(r*1v2_ z*XvDQ1U(bbDQ(T_MGkw43~xRO|D-!t-lnIHI|Avlw*q({CyWVyz&Qx@(-3Jz>dgMi zhMcc?O*W4U{7Q=3m_l_mjr=~Sd$pn2chjDFWZU*HqZm>`_s8mb0*owg1 z%jgD_6v*ZSbpU7j&Yw)u-@iu}f%xGK z2?Fq@pPMgP8i6kE%BWRDRfIQI`pdU-$82O@o&R5!o%welKi~glzw>kUgWcjv{J4!+ z*T}sMNGiX~eoaIC134gHEEnhy5GcC++R_%Fk1jL>?`BXv7vZu3P|<((A5Q!>oBpr= z5@rhjQ#=aTF#pi0ihp&LBZK6*XfokoB2qv#jpNht?_AVd*gyODcOsI%*;aq7*ZuQn zvMaDokhBxMFwVTmYHopGR6?4ZdtSGP1$#!{aZ{lCwZr44-qsP)r(48AG&ZLpUu=8V z*|?ifTJ~o&?srtK1<&f>)`R$E8+n6rFJ7+238Y%%ryh8P`{XifE@Wcw)I=o&`gzL{hcEK`c;lnK_0t~WaN=a^BUkPWmFZetUvh#CQ z3AlZuT(#{!tm+&A&jSVB?Z>d|FmP^;G2uFC6~uE8*P1>sNnd#CHH|Mu)mv`Q(4|VE zn6I~8U>>e)-j!rFlUb@}M1ezlbXXct9xW(V*Ae>h(8Kezp>wQ@&HGCQXY70X_r9+@ zwso#zw?GwNY`;wmaZ!xz27*YrxPZ(PG~)G^V}ZpS%b~@^Zy-(iVeXogY*)3lz{9DM ztj#gnDf->POTwilZ~MU_kcNz43uo|knyu#5jgW;_7Ap;%7UXbeh`Mq_W23P*<&~eZ zpf|#>f$#XytpZbJ1aodKNWBPKhb6%UFX2+*5|QN?8%jU=?G6+9k#CjQk&lNNuMWEG z?A^M~Zrb<0u%e=Z$tQ9HV2dqgMS=x}ogWND$XjjujFlOCy4j&*xC?+N4?Z-@x=GYm zUVKw#7rfI+a%?VPAKdnsml<1?M+UwW-~?Y1nhhcgb7CJtQ^=7*=^r82o_V^~&Yj7h z^|gOEQ{S+(zpzxXo#6Y2&S-xFOXz+=LE12mtOJ$Ik-bR;;XA;CcbJ)Uvjhrq50RuH zHj+H1kUKgQ9+Qc@7D7FtX5&3W?+ry)0A#J%?h0kptU#_3CX81f1ttrD2G!f_^#=pJ~4I@+Vn=pnv z{n(wsi?tNCP;$RS1st&|{#I#obvQb zjj*T%c;hU%poZ==x{nr}VT=%j{Ac%zyd3=|=RK_&&;@@Pi?g zKCc(gaKlgLvDTogv$7D9>2a88mTFn9ljV?yY#q@rwQCDFM{iEjzOnsY;Z@4ID=M)z zf6+0ry_^V$PQ^_UsKO~NSLh*b#t0GU`0F34#{p;-9<(m@(=zQ;_Q1o3xtf%9L2#Bc zw}QjZ4hDTBS~V~`nd_k9_WY54)uQ_i(Q-vW*1Fc4cOq3Tmb)B+?-HkLY);sxmi_1B zLUtu=5d=tt!nwa-PrTQb`XQb^=j0$q_o>&xMyW4>VG-r1$|H7eYO}-J{4$k=#lP~C zE*zEMi`BA0%Xcn8+m_nG(#SQ9saR8VH0@M+B>-B3BOI#cO^1&59$C)zy_7@lWAD4^ z^FZq7m8r{%C$haH9)JXx-}s(^xE?xqk!SASyQBAf{dnS!KG$T$pC!4kk6n)R-^9OK zFWYqvSBH$lxG+q^QuA%ojrRt%8Gflfzeosdbcpc{IeRF^&m}#I5(nSx8(-q1A=}C8 zLotqp#s9|FkkzJog@*6UQgxdr!Z=0|5rGv!G9=Hy>cC--{i8-ST3247o$B%v(Ro zT-s@)nPf~kF&GAIg}37-lsg;T8+%|W?y$JUAE4lGr)OFM+B3cHh#QQrD)?|!1HHr z;SE<-*D3G5jw42BZX+*UGWjO^^90D}L|}G7?QBm-nUV#PL>Zm!R;FX$!%}AT)8@J_ zf66OHMn3+WJJ@pUfPv1zotbdGzHifd&9^v%Pf3y$rr#8!!HC~KZ`;8B5!w=xHR;ZQ*5(eTg0|zvZ z;cVRe8msr!G-?f%YNC81+~<+o^yJIwXf@QK>>-ykXZkWfRBYt`Nn~Y!WQko?>!E0h zaArL$*=rXxR0gq;gG>4?76Ikwn|#Njo334Gp6cA*dsg{Y%(@38O6H{MPFF+?xfex3 zQu8<)o7=oLFi{HuG+ahDWY%R?B*R3+b3Ca9eI8s}YrDkvnKPBr2j6v=-rD!{nSI@q zIA3QZ-{s_mZl)1WRETacDL_Be9L`*!kh?!31s$KFWqR;1+VBtU`Z2vnNj{~P2%n@~ zBOaAZnnM7ofA#&64_ponaz=Cjt@nWK^{dNiaMlEw<;K1Gg8Y-w=APy75V377IXHzpJ zIRSUxU?XNXGb|2kdA@O>iDZ6L()4Rg_m#f!&N*CW zC2==!UK3Igegml}kT+XQtd>u)ln3BMkC#4B^|Wl$#+$dPm1lIse93O~WuTaylw+>UN|#oYuR+nqN`Rz?IZUBhZJqk9Mm z5g6F>n;wSfh!(UF7!Ey4SmbeabG?IJro>3F;r`7p z@-Z#7HrFz3bN8<(4RGYsc_J^EIS61&7*#WpJ71olyMU6bDKS-x9xI`tfalru2|+^F z98w-iSZ2SLefd_+_x$ofZB5v{e7;Wb_E@mud~tz@WE4n;SjQV6Dzxs#$Z(4>X3PWq ziNsW%+G$sm$7dIqYHv=(-opAO#ZwO9pRA1!Uc0NJB83Nm)OaP3wQsc@P4C443R5^K zp~rM>E&~xTHMvA~4$&TQ2pG}JM~6*o8)DzWcsE%Qj%GR6I8O4x@FJ$9vuZ|2{>-F^W< zq2eZRbdV7NZ9`tS+JYS*ieqBvsxVe66vb0!p5!SretfD@fvI07Eh`E&HQw?~A`P`; z)(70Udbs*NECf6{KX_tOnIfz)d2a!%t3u!bMzHW~6Z} z%8?=QtpI0`ym5NJV3JF1eM7Fu_#%I?bWEzw%`HDpX-pz0WjKZ%F+)j*qx^z_qGWYA zO8&;@K5i)xezp<~QVvezznHgV34t;}cL82@4Udk_@V$}tM#M&z0gQ=>Ra)tyIul1h zq_@Js+wA3dvduwyev#g@n)k;$kB$=RCC+{*5|QYCdvU6Fm&d*a0((5`d_h9fsU2Gd z)@~9$2~c;;{@MrIvC~BP?}#ZM*4ZptX%bdrKF6^Hngu+)jbrN`m0HPo?!4Yr!{Qa5w))$QFi?1@`FW~+24YHqo#ZDVK+l9B^7hi%5LdzM} z&Oj%HS@N4L%yIMa8~m6vk9yLDZoWM}^f=`3l{|_w_%bIjeOrK4Z&=f0qItoKAYhfx#!mOS_`Xe${kClUb8?CJ2`v&I}38Z zzNS`Vl6b@~G!}%s1cM*K_ z+~~qtCaSt!mwO7R&GwOs?vd}m*Mx)=XGXty=HVgdz3Jn%w^pZJ@-aSO-57F?FiSa7 z!mL23oPrX8YCG`2W@}THZ}!wO?rj_otNcZ>_QK9G>6i}bjK3T0z@wCy6Fx>|eDD^~ zwiBy%4kv;UC`Nm9ntK){Q^}H(Hl?6dKneD=+$rW(JG#xnll%PN3H;nDqBmjk+<7lO z=p(a{O70Bt3PIS|{V2qBk4b$sBu!2wO-SjR^(te!1eB7a{1koSJmmq+Dl3awy66UY z^o@E@QY}KXu%sf(rUug{UgFb0qc0JiF=7fIa(DalcS+w~(RjsvDy->GR0pokrvWM6 zD7>zBF}8%GHHh87InQid_(C-*{hEJGO>B_x2Ho*-)M4wD&s71LM?5mlHLD!@`tD7C z-DW{J;~Z*%{Uiv{AiOBdZ0=Y)RZb?m(-duQU zr?{>a2E_jhU$?eWN3ZtGszRzd3vi|e%MS{mawVgljhR^ExcKvfi@G8!-}^2Mzj3?M zeAu%fVq@1$7le-iv|WDDjUF~pKws)YW{_*GUQ)?Q7{ncG3PJ31k^KAPE8Ws|k2F#| z?1uZzHGA|{H^wPu7RJXZ8uK&4;&GG@2+(4*cLB`V)h0M8k41jps*V>%mi65yl{M74 zHJZH5RzY(k~B?AJ-Rw0-=gfkk@Us8w<* zvtmZ`e9iP7Z|CcXot>s#P8H1pk8oy9SsZC$)?;oFF@TtaXy7H6Kr3CSI__N7Q;Z8O zOjQ5o-ggzUg`>r<$i6B5({BFkgZq4Kuuq&V^9W-Gq%5AO1>4~qLIFUf&w0|k0drmz z1(p6fD1!cYyv3v(7l_u=$`{HNOUk5Qf8*+tb4u?*M&*f*3c{P!)Y%wZyNi_}u3ZB` zscwa%(r0%xjT3!|scR=|18)F!QImpB7ZIh(lv-=i>7&e})z#8_)EqxkYD8IhXtUYk z9`-)$EKUp)3u=77HL_6apgFI6M6#EZr}ps7r4Um^6=U+b>st8~Bg4camG*4iMq7oS zAYIdjEzLOz=Jzw0UpIfqg9h@O4nWcD-5l9L1707^pHROr)pKQECa?7l*y1FdR{v!u4osx5W5?*mq|_zwSs-UX-&=Ac-~!WROwK4Y0Eo; zP}PT&>gf%iB$+iiN{aNU1+?a|m1ZzepI6A&8~ zbe39ZX{|25r?5fgjTG|rHV{KNwSNaOA75?Dk@I2Qf#kt093eLblCIMm7Kf{AyvEx$ zpqhT33=6GePt>_R-en{DlNB{KrR8>8tanGq9rEUk_`NGU$#QO%l_7S7EQ#F^goj~Z zm7G(Iuuh_s=XtCl<5ZsJW4ikxxt{e>Htb10yQfs`&BHpRki)?Bd6)zXb_un(X=KNnYhXYB@oGgnOF8IuT0fElYiW1AA4@briO~^>7I_+b>Q8nPqTYRBue31zr&ziLA)a{8i9LD z4z#TdDMY(~M4l7eCLAR_9|2ku5IXQRPpg<)=DKDz@uwI|8L#u?{Ex@( z=d8a@5|*-L*d<@^xmgzr+~M_wyw^l2giqF_0c|ePN&~X(@{)dE45eSt>nVRc?WSES z9u-v``*`Zz!N4>Aho-hCC`(DL2Ijz} z|IYQBlPK?Q1vD3t*SAT1v>X0%z`hzuvSKI}v$p=a7PIeH$WADk`;ZuDpakW0G=0kg zMzAqid}^|8sceGu^9Ed>VlNF&rl@BIxD_UF-ie=jHVWn~HcgV{wxZnY{I2s4q zxd+vM$8qE>c%t)w4gt+AZ&kp)CD%^#q=vBDFm`pGM_0RMHFDt9Xzb93{M}hgafu#W zwHVbBmySaZe4?_$f6{@mkOz&kkKx)CtPC)hmjUzyzfRs%3}si47W1cUN56j#?#>`3 zDQY^Jf7Efan!NF5-#WwnXQnSVa^%EV3Q#v22aQvZi}B=I;S@{fpcv|-;g)Dd=!(hWo+lB=oZJfng!_q%-DmK^UI?WasU?50)c%LJx;}U zf3Z^ky!^EY&P-0oY_~} zcqL0QwhcxD5~Jc0ErkCFwv#BcJm(tlgS;eqK%^l1{_a1TpmjL_NfO|{U-a4`lx1OMv}hsovAmn z_InPP!IE%3NU(5#5hp`Ms$ousO~TvY_5S!$^vJ1E6J&}!Z^z3K-!XsjtcVZ$@-u5@ zI_!3-vvpUD)``5i3L9RUv}Kiol@O2l<<1(TakQR7dRAhq29wZ*l(mn5rL-Ii^LgkF znRSW?J~JK{`QWk7mmRElT$;Y~-L)_YHWALfL!{K=>V+4&f$(-h7NFKhxOeF)u8w#O zs~2S2B)*D(J*{I?AM{6FXsDLUe@B{pc-ue6bldjbzT^y*T`S~UsJF9s_#{2E7*0~EO**Tv{oPTX>Feywc z?ZH9YTox*r79F@}$!SD?Vsa}SO1|G+=I$2QIDXSx;`rE=boMgiAd8{tld6?w(uON! zH4MUM1d!cHhDa_I5CRjyv+GViFTnEuRHm04w8%(8 zB&cm+maOI$Ne&vQS%-&+n~vovMb5mwQSmdu$t~JX{pS{eKE7{fU}j*{4qW?*R;_N3 z{cH`P*oM%Xy)a;9*!*ayH+`J%S@cNL9XP?y?1oPFZr?EDSLFRwfZu;2IKb*Q9VCh8 zbtA!<*S;USn>TU-o!%}Fms0M40ZxE8*3Hw?-1H8+v7CeBc~A9LD<22?hdfK=^GHpi zKM&!&24?;Xkeiro*cM+gRB zvU~I%XTCktr1$!4qGacf<-ch7aoenU{c*z*sPwb< zpyyb>B2@UGak#`m)*5m#i#ipewmeMcdCRB9`s##ljOWO% zvN{2ug))O&ms`u+rHoTX572S~i&UDCVqbz(pQdCZRkADnr}{38I~{pY=+>5k;oNiO z*5W8n5WnJK6R38CBrY&3Q(r3Y@H|wh0(V`l3Udc|)S_n3wXfzKL%0?fBu3 zI?aY18BmOCv+1;eLGUP!aj|sa1yh$!=16%^@QE!jAi$oYolL7lTFAP2r=jnTN^~b; zZQ5QLr+8cRmP0oLb6tn7&x&OX3I_rSUk4H*VqBpZW&$Ok4Ka!BwGB&0sTo5i%^@dT z;v)# z(}fboz#pR{x~!z*&0_ca#Q8@w2&(xcd-Ro|`ldh6x=nOOS-k^~>xKoP);fd5_e@xK zG=3Apzc7Lo%TT*pYR$c#&z?3dFC!~%xCOV;jau#y&-xxdzEpIJ|0Ng(1Uql-!48sH zTL*sQd+-S3@C!I#Cie`0<~TL8n}}R|rKL_RQ!Dc2VT(XZ`$*MRe5rdwt>KHNsO%xw zZMnHlGw-Mu{K_Ll3T^2+zAhLpZfVbO`BgPaGf2m-gI-b<>bgOHVfynIO^k14;}NkB z^fxE%iGWRxO%x+?9u&ULX4YZYY`b61$vHh3Pa*>HW7u|FmK`Na*X>243%Yqq)y=0~ zj0S&tF{&?dN4dd%hhBv5qEdsHyXj2-)iyPc3S9enTo3`$AusM>4DjT#7B$#rydf-% zne;qYX*Pjkv1sJBL(Va;G&X1jj zbLR2GFlbLS%L!_yW|0zDSZ*02051sP!MQ7CDPg>(Uo08-aQb}XI@uYM3VVs?yJ8S8 zUuI4zaF^G0Zn1#O10_R|Y*Rzw{#Z(YWRZmqB<9j3XUJ7w#N@OxQHrPO`*jQe$$f z1;=Nl#Wwa4s=9jAO=700&yePz6 zWnIalgHUeDU`nO_Qt+11ITE^K zmf=Vb-Ir!3hd~;IyO+D?!69Ujag)s>I3`lc480BM;%5d>{tct^z?_A{J6}J$rCfBMk4S)1u#Z4`4T1lhrd;{#Fxw2X|sECNV(^zZ;#o=@h^Lbzp>vD zgy2b#z|ejG_O4&$NV&8(;-y+8IF`(#wFJ-kQ;qpE?~>QeOQx5dK2#8ved;4fDnI;6 zbWuTLT}uFK2e@({@R)$BCwQIP(BeP~<2#Lx=s~H~UFE@{kcS5XUObB@Ioy3OH@j;t zhV7u~B>t*GZo`cBPy)E58C;%-orE)x#0lF2ARh=ZVL)Rw(p0y{9tbj!p3nEH@HwOT z;(2c5t|^l=zA|5df=4rrPv)+Q0`+dk8bkOJQ&5cK$jOTXuC4FEfI1ymZQ{__H}C#n zRtIg+PM+VoU$8o+gimbWa{{0Y!jq=D)eO0DEe3352zLqryM)VvD`?@Qp#i72?zoAP z`k}?K*S;$E?23H*ZvS*_%AM4uvtWR&3_L9sYG#{ad-3(4b|T|OLn#0?hSXHXFf&T; z?7ls&H=3F`?BgTpG(CNKASXN5S!AQI#nqoT0S^$kRtqF*hl9!Ca%>p#*DVETNTA>q zjNvJ3__P)gcjWEQV{6y@XgaWinfFd-iB@M;5+ ztmGhJBwIKcV-)sL*KqxnFL%D>e)5N3c2lX{x!T6Zw?2ANk1fuT;g+?y-G>@!L0?SY{%wY4Yqu;)4!8uiEAIKZ%G)3U%%O7-oh{`K;plUn zUKZUWAq_joA~E?knr z7&GwTG2>8@JWX_7d#ibEQ$bID-ZjkLE9KKj>834#16nPi>S_Bt@2UAIpq{Iq768#) zNXx8^EWsN_FfPewwrXMfL84g#8^PK6qN#{BcPPc<%aNMEkh32>64g+75tse*clQYA zWW3=!h8kK=2r_-}Xl8#{v|eV{kY)Jkp0uUVu8k_X>q^Q$tQS{;Msxe+o^qk5jr6_Vjy98id19vJu^OKl#f^OTUZp*GQc>@GGB%NI zf}H_JV^{_e1O(w2F{TV9b~Tx|p0j)S+Y)CRtwL46iS3e7qb=1_! z4OZI7>7=nXf9-KxR91^8`XVTWO7h){!mM2kIL9DM#-2t0)DaI7EU^%{S8 zvx8G{j-GC)vExWP)<3hrGL54NO@RS(xy3|EIav&JcAOIEQIm%fD7cI$?3RXR5d9A7 z6D!a|vbEdbyA&2@FiVC9y_Ze5*WDz&*=KdZWKDwU!xK$sd!c=JB6^H;n(b9!;UZ*% z$SObdyx@}1Kqqz!j~$I2M1ZIy zNsL11F_1u~g`8kTyc~Tn$cd(!4DERAn|I~K-Ozl4It|CrcN;~V>ft2pIu7X4g_p&UaIcpVC>wiA=KVKz)N!ZD)w$lBI zUk6Y-oC3PhMy~3~=P1Ov2Kk+Lx9Gie`Q)@;LvXdq@Vl0K<+;un1-k-&jCJ-haqB?O{C!e&%6V0)eRtiUc(vO%T>Sy zg{tQD1B>3DxEV)<@JhHjayt&v1Assl@P%Mq(8zCmRy+`&YY!%JE@weWFM3yh-2hCR z#|0}~G)LwK8@@1<#R^&$=D|~$rMy@U@X6yRanKL=KTb_b(7mXv-=_gFHVwlARVT>B z_OeYFOKoIGV+T$WMkyG^ zQYtahua4Ri?tuD)|M0Q#9P8je%pU)5d{xikOR>O$9ACiI9zskKUcn)(P7nU)Dkv}! z^8<0XQCug}3Pd6Yi1A=wdl&!}3hd5}86ruLzpb92~rF8<7Iz2DkmjcjX%p6P5^Wu#?x1-3OqAzaO>u9|X#jh{yt$ zQBVvaUIPWgTY_}vS%}GE_`>)H*gp>WkCFjz@yBiOH}e1A_8J2JGorXY^t(94xFKyn zx&O%-1^Ra5_?|z^C=M(ML^Drhjhn?f2NvT`fq%C0|89{G!F;L$BRUS(249UXsD91HPj2r#bN)v$RIvToM#*w1K&3r|c7?2Pv$-a9s-61CL^cxh!P(miQi(kDITqTBBA$@pGRlQ>}|8-ryJ~FvN7qU-MhEh+juR# z;vUv0x&`*@zou&Hyp#iAzce?{G*@kbcvk+?Di_I9e;Upu|Jt;;syqMrU0?j)aQzrT z39>~3VunSJ{qN7P59L)&_Lt7u+>3ekCdusd^+V1ngx=;b$hff!C0X@@AjUD_)>`dE z%TrxaG$YE;M|IuY3~u*~%hbs?Fb=yJmeg0jQc3VY{iCM+dyNEy(eP(sO#DF@gCOt# z>cHpP5R4-OlX;Gi2jko9&$@$(6|isrvqYqRnqg&aW#I&Au;?wi^5| z9=i^IJ>p*}-P7bAn8?@Jy^tdv@I`V!BHFQnlqvEq``ePSgp2e!P?TrRxL z3iX@{&%QVQ(coXwZGS%Ef9bZ>R-hcm$&5n?NV5nSXo6U7etp)u3FtvfcqKa7W)M{Qq(T zkV;ao!;vaNkN_-D%OJ3BFkQOrwSU1R9#fHuHM1pCnlOxu6o zssjh#zi-_B*BF-n`z=%4{9o)QhIB);o`2V_>^Z(@kXM6E?CvO@{gdN^6HT%13j8j4Y*BAgkzg%ONFL?_%rq4-e zj%|WQbTlEvcI2BObU-%6#` zuFy?kmk3Yxnx}0g6zsk5#}XU?C;|9`|4fWn8vF?$ox(UxAoy4$lJgM{**oy{ODSe& ztEtKlcbUT*+|rgKS_aq%Au)9H=S}DbSf8R^Fg;~|L?Z!S(~}n(_*m{lI0Qen9_=QyZfXElQ%QSQcOtAx3`*ceU zTlcm3t1|D&Pj;}iS9T%OOBw=-R@pQHq`4p1V7*-VHYv`LVc*u~!2qUQMR?@ng2)lQ z;@sUX=T-{d>VKPfePqL(_kC%*x{u1fBs807*X0h8ALp*r&!*rPGEUq~)~^oSduk~Z zw6x6lOsHuE#7J6tmk9A#b!OgGtR6ayW{g#gcPQu!v3kExL^?0~l-*NpwKMzd4y+GH zDUEx16rNcTy;z=@XQsbqJn}uzNafV}R@JhxQ>~;@s3K98{i_9iK@X}%xWhmg*O3Is zmY|AusVPMhQd^DwVvGX3j2v9QOH&yX0aP1Df&FC70Bndim zepd1My)QRz%m;2Q>ewS0rStZ!YAQ+6v$n|o=V8l`<4@%pOq^CXKYjV*%IsK9{m7O7 z!QPw4L;d&t;wq(*kexJ@E!nas%d~tELY9ziD#;eI55`PUcBT}vPFa&Jd$yVEOCdWW z+X#_mMvP&W&b#Z}*ZFPb}vJ2U=3-D-Bfpsbdx5HrvND=vKtl{-^95)wSi4laVD zG5RMHFVuw{qLXRZI6Y32zr~687$tbHKldVPz9{~L@=_I|LyehpCyq2Xr&Ub7OtT+t za4ZRg;jOniGEV>T{;J6h)AOU_BIehP0dtFyVbBhQQ=`{&Y_Lk1ClI!(loo9No2^BSVWR-Y>9t#fBZO0QHZug5zl9#F%o0Kf^nZUdYy#|Aie%OV@V8s!p zpOGh)GnLW}og7gf3C01U(-#W)QaQv!T6bLeW8-)B^{sX-BG==EN0(j0r+i*|bDd3euX^Ff!1etN zmJ-`-{r=BH9toY@@71t6X~B>|HHrG-o*)mBlvYW}ErOs^Oe(9Vcna<=!zp{s>*Bpz z(19Zlc_32!57k(rd%WV!WbLhM7X0m38mcDT113J@Cphq|3pb4JtmjQ&&Km&WvyJH) zX%tsl3#jyKW>pI39^U^+%-~S$@eW+izMlQwW&-Q>DA+I1PYncgCV^M$H1wFLC$O%Z z`A3^!W1rt#p=W)sUwzata>C;-UPmFMP|^h}$g(NC#RR9pBi9 zCR7szI6pma<~2QvD0?5T*n6=znQQjja&58O2hb$YFbhd>;!P6DErykrTBhm-2 zNObQzrda826SI+eJx*_UK!3U0Xh~i3`9$@%ZUJ!x50hBsn2KQ;kHU@`%Wr(#D#VCs#^-cF6lH#nZ;QfkrE8GmR16X zCA-tp#fwH_LDpjI(_d2;QbW)?eLEM>%&0MK)FyAKT6R3xOfrCU);ACVeu^{|Zd`{~CgceGF9`!2!yl(DdvEv6-5b)tON_ z|1aGUN0Ug|1}akrC+FL#-ty`9u&ba8V&*k3jA)D&oiE{gm7%72Vxcy{ ztuNuwgj+RLEdR<6%!lctpaSQL#dBlgR9F!ubeG?-H3{z+TVp1A@oloCU;;PqbtCzt z6IIW=JjD`Q+Rkd2wzdfi`J1<(wP}|KZNy>plY7Y@yeDiY z@ZF*|QKsZUMXoXwd% zAj-5u)Tp*Yz0SZJA7yhm#I+djWhcTacYh%=@6pn{wtp0U{B$wEm3CD5emh+i=i zEdS8mL9t=EJ3mBHox2O)$&yaS^4&NJOFG|vsZ@;)aM=w|;|>HPOx75s(3q5$R_J2l zN>0svap`oDM1u3f4*|9d$Gnfna@xU>CqG!WHSnlSx#ecmue-szx6c(M-#MjineNWF zfCCkR`?|iBg2>t_WDVpoLWWdto4_gGzoPX0Dbr4L7OeJq2${XpB4G6fQObxo3IO$I zMq54EB0Ug7B(qj)t$gNpuQs%s>S`1oJ$fi=p%jV2E#)9v<_xr>GT?);_6HB9~pU{XK}l(Ph(%Gwif`?hP? ze(ri5(P+XgF}#p#dF;~zI&KK4fa6+*L}aq)vX1PCyR3VuXc5+5JWn^uG&QI8g^gvu z{lvIENYatqdA6KZ3PJ3x21kDjbsD4jInEhEwL#bDi=^lHDJ#s`se|O5=P8dfE{Z-L z554%2Nl$nIBvG05>(j3FZvlt^3t|a?uxJgAp?lkSd3ri}ytV0#EgBH*Y7&@w=+&#B zQ}w>(Os<8w>y64Re2{_P0J$a~nXH)qY|d%F>A5=B?>A~drP&dLoOXyZ)fv^KDhqXl zJxPVcRiXrz7#hTqvI{o!>fXUF&J@l}N$#SA?A9hL&!cMia{-ot;NUP0&MGkud{%}; zLbx?Fi9akzTK)M;suo>JgX#}0WcoC2CO>;|ajehOb-pdd+*$Y7GPT@YMtH4e+dbsM z(rYQP_6zX|;ct7Ei%alR_+1Qlzdf3I1NjM5$FHDCLEoFTPP(~8@ru?})YnLKW2?QJ z#I6V0-xw$jH|}nX*sdWJqT8_GHRWCY_LE zfQ!=5M}xlAYzJOTi*bMGzrddSCu2|x;)02#Y+%GmfRH^FaTN?1#u(Dy7Nbr-fdd+v z8pbsTf-W5->d0A^xd}g+I-4UXb3d7KMbPJdGWi#L`^h9i#DXDz7a^Vd7fRN8m$-_KL6|tq?j#Nv%1)bSN=fcpgr6&9K2`41p{7=fA=s93y}ZRJh)9io zURhF_pH!cjWEqniUQ}ov_psh@DDn?W9!6BD0FbJyJ5un zLeOYiU!~v8O&#X;h5T-3>#Xl_HO+;bjAAEbJ&aVgi>98YpbP=Mq$4Ku0=ZPwdl|)A zm7{u3??`z!*-OK@Xwv&9)3@g;L38QbKjcD#*h5jE0bkOy&E2eRguJ^|`uH|)&;fMd znGFepK#hpxThI$ErekWhU%ti?aL@)e(#Q5`*3GUWgz_9aH^sku!C1v6MDa#uuOw_w zfI|BW*TVy#LDK>5(3YAX>i6DHhR<|JDNcXvIoo7n%S0ci2#ee6Bj+Vv7e+8!JSH8> zw@l7ZHX7YFO9uXi%msmuk&@j?L$8q*=?yDIXgvE9EZ;CfhN_Ctv4hPThcUQ%yz(b^ zEChMl8m8UuknlLz;)w{JMgb|S><*B=KU`Rl-)`UszIP;y7K5d{owC+ICm zIT_W>xq#&P10AFTeQKqj?$QewXSDM|fF%nj-LGERIDqp%J+V~tsU3QFQG<^JX@#-* z)eJSLm%3dzkS*%YI=M7jj_>`}%4YJstgGtHT7nIOqVJ!C>B@9X zi`H8mS19>vy#1{32douRx!79D>tZ^#jNxeTD!6$qSFEOP)FUriTl0$4)k`d&qZeq9 zId`|&wZ;<#uUzqY5d|Lv?liosOBf-X`w+h8bGQI=SXifWzZO?J!KK#0wXIv}%gL-% z{pHbs;fnK%=4ZO3HH6b&s51hCzsv2c7N$vGZU_mT0tltL8 zU)6`~EFCFsI6~`e^QyAONn(`{r$(@QX(l5LB7}o~T*!-ga4XqmN6h~4)rh(aiH=&a z3M~4dDh_cq6LgO~PC;BMMjzYB^IW}+lAEA4t0e!}4ooX+_esBj;-@UyQesNF-onGI zhLtDvwatTjbk+KuEtmhf_XaV};0n#<{EJ*4IPhbmvqHI5PWBt?B9 zb#1+d!4B$GQlVpgWI-aNNmzWTM0`X}3_Ij>@*X_v#ixMa%7G5F^TE=GGQATb|a z4H}uXMwf;$dW>Pbanvu0jQyU&(ln_8no;&{jQ-G`=Lo&D}`966M<-pY2UvN5=Q zN^YdG`eaouqs+%^&&g0NUGD~tgLCgn4Is@g-}blY#Ln><$=i+~x<~2_m9tj{)gJ8h zoWD?>Zu?@wzplD*EpxJ;91qHF&Kw2y%F0>P=DJuAjK9-ZoL_0Vku2Dl|<}@zO1{1OItZhjalRH z*!b~2sdp-0({(*mO9MVPo0&ACB}7V&Zh$(8qzFzM4wuNBzNe{Y_2mdrt;4m~Rw6dv z^1D?~89CY~q0ZgfMLe^2dGas=pM56y>@!4(yisp230t@h)rGpQ8cob*R#8O|QrMy5 zEjeCiEs@W8IOm+{`5YNG><`S#L&vKJbWf{nmZ*{wYM+7&UO`nNl6`Kv>sNm+{%(KR!ryD^MN^U%hwvaJN`5edgg6r<0F*Y=)6i@N9ch zk-aFEB#sLtsQc^2+`yQm8hjhqn!OprxBr2BSo`^G)+WC*>m~rwfiEAQ);RF(sd=A+ zX0~ULj9b5y&Co8C!KyWZ^9ZqlY_gUCgV1jy&K2I#abdL8(BIIwRj|FF#MSg{;gl<% zx1G_f4mY2-)x-10N9;nvjUQ#Q?>Aso4LF#K`UC!c<#K6cqIvPB31FEviv05O{{n^e z&p7x`{(~I+J>RF_8Mm`RXXI9cXHeX3$M`Eh3XO&?-;TYQ%+cjB0Pw61M+Kaqn8mw& zM^hyEXk5?^j4;lbwKNiT)V&B{kVknd<@ElEAOFeVz+*fyK`84;RrL8q}|-U zhhjrs4(ivQrTk{MGwgN7PIRSBJ&_C;%+|@RA^`O7J3~- zQc`6`BS7!bs71gO4;Xf+=c)Qig>OMZ*;Y%RMkmOx|IqF_BJl406zHObZ(%@(4e&tb z92<}a02IFt;xpd*S<)s8fBtwmuDoo z5Z=;eotGAJ{(Ws_{cwpyD8VH5>bKq-FWq4;V`tznmb)$5BjkkG z;_k$fqScV$;?dmV&(wUf3=!1^ePYi~8cCahvQc1!>1K5_kLv}~Z}{+BZob%eou_|n zcV}@OeG_a6L+fl6U_aa)fWJu76+wV~dR-28c!ODs64lHn%hdyBH+AQ+q zq@+%u zyX=vEJx3=jW3@wgF({dG`&UQy>c9DLVRj42xf@>@K6T`D z!{Ayb9^P_|siF+65#LDb%1 z(4dN2LamsJ?NA03$Xi>4iQAl%jh(eHq?sHr66DyzDf}h-PSUM|Q*VUUj)%R!d&{y6 z**kS~7{mqTgi)@v(?n=9Olv&5I_Bd0np8s*5xs~M`EVs6OYfC021wFI;WR0Jolp}k zM|+@Q>_sr4yQLNAzQei^ALKPqBMY#_qH2=kyd+v-!Uh^otLi&< z=@aVhV_zPpD45SU1|4VDC&?pNVv7ZR>E-jWcdD@Q>Ki$$r8b3`PfnbE%_8ZoUL%oP zg$t3(f37=F~W%hN#)s(6yvE}lnH>!k`Z+G0u2~`i(b`>q=qj4Nu^{#C^ zd+~hdg++bJYIJRyWj+uCCbkN^1Gt<%?*bH6E^mo$DZicp1hN_w&m5E!0O8G){Vc7@OI67z zvbAn`KC}YtuwBcv*qQs}Lu%*s8LRm<7j~Fjmu)_6XPQ}N#4gOPm9$ndZa(mjVm!;D$ zxD_cr)Br0oeB+9LvQ~h4;3L!XbyZ^^6Qfur$`F4u`sMC8YUqJEu`akUcXgxQbKU7t zd~1VtpvC$$V?PM4lwP0vj}GsQLP=o|Hrxs~bkkvU9(xw`e&8n)uYs2N`DsfK`05L*#0mVrLAXbs3Yuf0(eBQ+8)Ky0-<$ z2BF}7r57c{YuBS=K+bO49J<-D8$H2l>ihk&?_^G@%TE65jH@~iypnn#88G6k7l@IB z8GvgJ`N`x9lh?uY1=SFQX3h;sB%JWf^QQ#Xy`OX^T(e|i_?s2Kbb!5 zlroOXLchIpK~Y40GOeXTXPf63F$X}xKNQMXD$PBU|EEX8sH{LCVo*CEWT3c!qda*3 zlPRkSSgbSXpGI@LZ!Z<(yG6iHrc00=X;dfl9f;WMfCS}p=npLrPx@_k z=)al0+miknkcY3p?Sjd7IWR8K!@wf;;b`EPrTl>XK0zhaZ_@{?F1owJ86fSW%P@d! zu&*4)F+zSaeeI+50U`9;j%5G7U4VB1_S=Zd|EUp6f}*KEEpyX9aV~OyI~Oef+_}K~ zZ_gGwQnbzpg>F~@G6B@B`JYTb)?JPg^QBE(?&Z1&2Kq7IoE=%|bvf&5J zN>}#Iz59F539YgShp+|TaD($n3Uww^# z)9CuI&Hbx)^e_HH%U`{x|Ji@|vl_Yy{IMfIO{F&FVyKV);?}KG<$v)LJ~>3>Ytj1U zMp`kNDtLb~xnFI_eg8G1DM01IZf4?K;wF3ZEN~*J`db@84Dp7eCwmo-IG$+{u*{I~ z%d(#668ZeG{EMuhhp)rEQGejqWoJc8&9mS7YashSy*Zv~$#%W%3ML{opYFxTsuLNh zosLFr7%Rhe=jd+Npd^vysHBd*N)zpn+Y3+rN_XTvEyR#rf-+&*DES?Nn^+6>yl|~0kM@RM}xrwOe^>EZd+D*b4gz77VEy=D3DS^GS zQ$?h|DEngnY(8{0qakv1-@=!&X1K@SDli5slVv5Nvc!2m~g2Ez6YMZ6B- zfY+0=iE{BYKdR=(S5{54!j?L2peSSim#mtJyA9#JHxktBx^8O?_`ldEhO~zhWLVdo z+%~tAe79DRIVP}|(^dV%nw4%`x$dewakR~HN{SLZlc5d3fWP}JLp>7KTY_TvA3y&@ zapLV2xaj#({mgxZ1WkkXXp|d@BC8v|>FA*P1T?Th4(k<9pY3{aVXX*j%Xt-B=*s*y zC^UaGLlawFU5A7YN{nx5EoR})0FrG`HaNW(IcberJrqc~UA4|f3DBjq5$r_tiJwxd z+A@>#JM9JL8xMJqD!$+{`GS+iP76KIQ#P0xT}988jt4UiMp5;rU8YVBD;v7aV9-^@ z5e)ueKLPAupqs15ip;C06;3`^$O~tWUfdXRaAq|v6TVwnK|4$i8iy3WuxtYuE*KhT zz|3@LgW?R7&(NI^gO0X#wOZbuQa0vtWqrF`6Epu&G6m!sCHZ-ryI04&MpLPmtMkQ& zzZTtF{k%h+SzjH)@S0wXqtrJ++73M)I)4i{v5(QVegw5t-Hmmj=T*YS<=Rn4wZvVKhlt6moTU5?ye2mL z45c7@!p*wDTBkrzBT57oVc;9D%Jlp=(xOe`S(q(j@=Y}r*OYqRZj7Fug%e{)BFsp< zanw_cR#Y`_MTj!FY(`*ZvsB3SsBua__Oib8nuoZ^dU<&PWc3SI^zObn15}G0hzYrB z@l&)8S$e#K~|Ns z7+pw3iWf=IoM`+Q=Zuci(&{hX63}fkJTYNCOc+qU+W%Egkaoc)$Ll@w%ejqeNsU-R zx+u6SFoe}8@;rF2CW8}R*g86}9KB)H!FOGmBfU_LC>HtWUx z5+7WZE{z*{opOn)>pXpcAw*Le#>&vlhkfsnxbe0ZDg`y;6Z54rEipa;=WeAuxOhF* zSa^J&-*koLs`4BSsAh$jR2}g1JcrLL%>j4LUFCUEJurUCHyM z_W0;i#sub_Q)`a_IlmuN^rFV8ZGuO`NCt-e>=DZDkXCucZ;Ug8kFn0-I+w7jM_5Ek zlldeolWM3ZQBB&)a`?PHLUIc%(w?P`;z)Wvi+u^R|7`!&&c^nNv|;)3h{<-={nt&s z%mv{Oe9Zd9)b)0OJdKb_w7C-g7gD1_C` zHrlR*!^^!o@1HxbZuOwM8w(;j2S6WQw=jeKC({=Yv@#BQ;yqn~aLGY6wZJOHN}{TK zL6j!~uf2df9?0Z+3tz}g*@;VjkP)Z z$m(ekyh8)Jas|{7$S0FCDf5KB$Jh%Lw-UrDz?wQCIxI_rzH#MrJvVu?;`9!A+vIK0 zTfwUvm*=))LZ?-xa}dFEJ$$nPVampw@EG&*eqT-A@pZZNburFMZikOl)SqD5ixmVn z!Wk}SPA@@uut#C459H?9QBcj;vjbUx#^Iy-m_~gXU0fnAXDH`NcVOS)mjH_~;ZpbN zy;^;J7YUs4X$KEWpY?7j2!ZcmB_=v^--w1HftBA6Acm?a0+Q^U7D*q^v)s?v%Pdag z1ZW~9@M%j*6$ph5Hr_tvIM}|p`h@37GecmI#xMwUT{pE_5j5Ct2hB%7Gaxgx#X7xD|P^|E`>(@g) z1tMyE>jEtT;BpSqqA7x!B`O|FCr{`8sx5Wt06XN%Xa~St+bzailgjO{sb`myH=Lt< z=lsYiL8V?fp~iu^cc9~3CpUS#TJKNc6PCzOqcf%LeRvVFJBY_R;V7cGm9VU?qUBUM zw1yq2w1puuT=n?yXmwCtE)TuwMfiSeD@5n$0yEdaUbi8Xhv(R06UYHv!$sjLO52Pd zZ@|-ZN#V6516^W3wA|Vj_2W{5-R+=?N&=-vL^>W3&wr)~@n}PQ@cXgpWH-Mx%L==` zp|d}r6B6WD<8eN+EvcimWv(jLm%+xw45eu zs6ohfCy7-L*lWgl2M4l}Dhgl{kp&boq%=)kjrqDl)f0Df%2$*v;HP@AOTJKg!Ns z5sh%zS7sAXV+$G%q!~lVBlL1Ci!+X+fu%s$prvj=HR-OgY_JPwbk~utVNo4s&cWBr zAjwIOMt%LnI0l4Da`sQAr6bBa5kwsM#R&8cdi)CGumoY6sq453z%9%5bD!#pO$CeN?2GjL$yAj)oG{jV@7|BA8`c*up^u-MSr_`=*WgICfRB<_=$X*l zn25Ue#<$K7ko{cKs_}koXC0gZ-cC4^US;kPh07NY9_y?!%lOrIRjyvTnSEROv!PVsHLN|a(NTel z^jLTVO~^+?4~+N)@x($Gqkniu0{Vk9eI#?h(j);8&pg3>vVA+MJ6UN-V0q#p3MD!R zM5cXmpEI#lG15U;dS;@yc1GWzngC70oeT-GGCe{X<-E>9$!~u~3n4CK=Pp}js?`oJ zq*85y3moE|9iv`O6l0h6NlWs_S)5DLPhx=-)!WHNC9Wi{`fOpNX%*mV#UWd<{NU3M zaIFWDrkm;(WA9PQfTt$(jYx@Uv)mo-ncR+9EfL>XyIrsIyc;-wvYb3OZurOUGeb4$ zVbyNZZiC0u@ibx5_G6?h93+a^LutaixZqhR(~_$7uX9%OI2%hnwj7hW!#18N zv7!9K7gp``-0}(f`61{bSE(Pmj6=!vY+?uFFkc}&U>4OpjmEH}oNyd)?R8ltv@jgW z=rHcD8Y|H`(X*C87JXNJ#C2RIQ{~k}E4VaL06kBOiT<8(7;$X~D+7&0IOdEY{utdX z=DQSPztCscFYmRe=X2bDEp6>?V7%kgvOOHKOlrKRy1QijX#2)MkrN ztpwq9;c&Akc=J|Uv*B2~=-%r^oke>Vk6di}8t?W)YL+E*1?37TG6LG&`3O!E8h)xD zCNO**v&C0#)Fl|aaT=5^F-&Z^@J^Z}z4l7p5zo}lw9jnTawU5PHD1FwC`>SphOi@K zJONEN3Dbn5WVrEebET{wvvCZZW;=D`y!6h+a~C^u<=n;Bl9Foedk}rz@4&{?QwGV8 zMz^9+PM+hVZ4f~O-z;@pT9$T0HA=ccJ%8$>0DW-`JHS7AW-f2Q`#dn5sURQQbW^#M znb8C#Ry9b>6yn(7*~X+q%ws>yQTV8Fl3HcKl&@ZYk|Y;uZPH$ir~B#h)#Yr!!SOs3 zGN1w6O6Fb)pXck2BmSmvscZF5|6nrnQLZ!eHiAZe(Ua8rIvNY!&;pX7=wK98*jJ7& z#of4N*n@poH17s60_ATQuFYwDau@1jMky$E?B@Su+OuQ%@x&QQ%?xhBm^?_? zf_4}`#z3%2a8Gc=V`LFLvzqKp49JkOFw4dGu1v{A9~bFclOS=cCYN+WDlG0btY+Rl zkde&pdtZr^|>vD(z|m=q&#)Yl@<(+LLx$gxV{SPCgu)$ zBJv!3TBl|NFzG~7o2_ND3meL%lT~W2uKNkxxb@KBl+$_J&@Y^@H;@YG%4^~_S)It% z_XI>7?u|C5vQ7S2HoH0TQMyp9CC97sap%go6t_c(mPy-<3kff#-4DGm)y)D->YwlZ zl}Y{I`sCTOUkRQ-};NZ>b1U_#?NParg_N2^i0go6j;w|kL}22owF&k?k$=2aK~{xt*4R!)5yJ8f zSP!)Z0ErDg0pc)G&Yr{^kShCpR>o-EWVH131|fP~VLGJrW7pU-Bc5TO^gn9F%8evw zLX*TWd*b?v#`LiUCqJ(O_{5xSmLK)|#8WIH>6uP-^;P|cDmx|ueE1+L-<)R?j1Ae3 zI_y`d>8UFC&%K3Gi7apixP*UUpWorkBcdbm0Lj6tI&JEVRvScaQlKr1 zN#hivSI@kSJ!Zk3>+=;h1|=V(JRy$GskVE@Km@DS4=m$2kczoaGFCBFfeXk0Drka9>f`gl!SDTniv zEaf@r@bbYbcN_iGn-1<;1KsNC=QX=i#2yk<1@cn=C?l(awRb za%T*|_CXyp9LRn|Oe=esKjz>(+N^w#rt6nCZAf@g){x|K_tDIUs-x|Jd_7;WUkirA&Luf@^s#T`CbesG>4d>eQPb9!0$(y+XM5!&dY7&VUQVY!Tm|;xq*Pl_ZxtbU9W+yFiNHz< zfXm1;^`Uyb;8T6WdqR|+)vH+bCl^tXn?g5`ky(-bu}B*p$!dfX74TS`eFaDzxbj=4 z_aNm^w<*zmt$1ufh*Ijc@(VV_ZB0p2R#R5}N$^NZ7MDFkA+w{ysF@7h0fsaZ=+TNc z+#{qkCAo#6&`50jIOcp|4I{d+czxjM9#3*>BAm8C58MP?;hRagVr7vD55U#I-S>#!ei#D){22=LW%FY`BaaXZImie6?18?T6@ps ztHqCFbMar&1=2XN0s*%W^M8p#Y`LMvP%XRuXhNI{1n+Q(bcXU6tNGqVqaMR)aJsQ~ zhZ%Xh%F%@XfYu)-WoR|CT_Nog-Estoy3Gl8f*cF-bLw;lCM>88z=U&V!TAga79hub z4?4I-49jJHVWx5q*aU`)rkxj?5!1P8qpq9w%Ue`h!$KKAP4YK{kd|mrf}0%Zv4TrA z;k7cnl-^N-Th^b8Bqf|7!>)Z*S2(cIdFt*hICXEdu+sKW=Dlhy6u3riG$u+4Rf^HW zivvjB-*?%g)0$PETfsdo2J=$#YWrWH%ZmrPjxlE{ci(0$8NSpQDN5g?&69H{tM`b^dg_>S*aSLplw|3iIhbDKN>1(-4a{g)ar z&qgdxeD>D7 z6P@B(WIAZ}g#J?NfE9=zR+NTnL6)#K))eR;RRfPB>;G8i)-RPjQ8JrOEh|lBK#)xs zeyu$;Cwk5nG#sl!L_OJLkHKf4qZ^b)RlH$_+20R;+YS9EMU*zC(fBo5)(L3vm!}(P4(hg&aG*FC(zXjzdJCTB~Mqx#q`_D)Q=X@J22<9FP zJ*qJ+-5mFQZ~Wu1-7;IY{FSB|8z^4qJAvSQi}puDzhz6g>xNz5DVzOI^-o?+`6M4V zBY)-*T1`T{EJ^tML?7%sQO3NYb{66d zBQ{dX?{7Lpe2n`c|0CJ8dicDWBcP=+z&W6fA;+!*mEe;MdYh7-i-{Qyv{|AKR_TUEClHOC9J|w zr5JHKsuIH2=0iu9TIW0>qNLqAX}ydG$5pX9rm2V z8X+XDuGPv3JFSsmx4eJTLhIGYk?atdI9%-)H5Ib->OoI(^7INXOtj z(_oJ;Kw9!h(<_5Bnk>2lGMiSvVv}R8yGb?79Ia0a-`eee*j46}F4r?C-00afP1I+P z(E4Q!z`x@v8hl)n1tHPydF~E3Q(BeYy-KrgA237)ZR3Xz(HM4l!Z$Dy2JB1;fF1>qFWYM)YjXV2{L2rdl>zxn6^Jis_kfA#QBPW%~cf}A>+f8WBAn-Pgzl4 z@r{Lr_4~QcuD{iJ%N-c2Cwi9xS@}g2#JoYz!QCxA@Eym(I8M7taml9f4JjMcH%i#Q zf7kGfRi&!-X_r`IgROPv_vhs<$De-csabo=cxYlU*0v2%Lic|6q=@RG!8A|xHd4<` zxMi|yDPVDmc-Lc-UozzH2!0I;jS}T$c&v+S=5x#ic1-RRPM~JqK+4#oq3(dxBd>s% z+>B2O%cq1Bmon`}3Z?sRvRBw%5lwNPd$waW;o0f!mF@>I1@nz)nfH7etQ(B^Wuuzm z9QuwSzhEkuwOEK%|K|`Sumt(n)@%%pAXxU~)A*oN>|> zSLGi;qRu!)Ysk$`M_D5ugRI_W9~ZtyC5qsFzH3p`M}wv3$-u>fzNSV)-}uhdMn765 zqTomMY-^xkgY#eUqY1{MN(`7&?r4T`(G>a-7pC|Jdc%Y%Lsd4WN30r; ztwzg}y{{MDj*`ikV+j~BH>dI!+Pt>y!C+7ap|?HpqvRv~B^sIu+I*Z}r(VwIe^~XJh?0`=etsg+>lXP1q)&&t)h-(GeF7nRSAw5ZPujzTs z2Jrser(GH!7qrMv^Dc@q&m49FY<%cr&LC{8=R{#udHuR31~Z%35z4?Pbova z3>j9>yN-TX{5VmcWE_ zO&v17+9po2N=EbIp7NMePs-=K&>|;S&QHMf7@u&&YW9{|p*abfR2iYY!G{~k$`@f2 zJ$m={KF!hf6Q%?U(X($Xw42V_HS$J_7-~-*Hg&fGPU^td|HobZUEls4r3|}-0A6-N z2&fYog)F-*;&)kRJ!#J;<$n3XX7&~?L^-o;_S$~n_7j93X>(uS1HM>DEWic61Bj#Y z7z5Mf8wiu1O!>iYK@9PKM%&x7y-njXW>|`nAoN5SFqZ)_$mk{=_KE#p`|peUf0bZ3 z!P6jP0RrY;6^Xt_@|W2URJy5yaNx#1;4=H{4x(Eh&Z>YolU9%?V-NUj?XQOH{1`+L zk3tyY(NcqIE%%{67V|*#`#6+lCBmn1GE`_tWa7nO-(Ad|5 z;br2Xb&n;?;yXJ}^XW_kq@Mb5&{)x9vO}Pc=fWP`pN13%0(O0wX#jNl0f4PnGBMxt z4}N&@z!++=&k(?{pGQ$rf1jo{)9;gUo?xf#Ea50Dz^LZZ=4Nt_U?4W9Y5O9;G_wD^ z27%MO~Rh8=-A zyT!ag>>)uu02;jxhL%|i<7DL!ewt2Vjvc}5j`^j816J`@x3WImU;TS4ZUH(*jX9QhYsh>S+ar9R zbK9zV8nUcBPCpP37S=mbbns+Y=)ppzzrSVRru~B{YCrt_Wc=w@8wS3UKLc_^;HD^` zVjyg%NDq^{E^tvYyZlaxdsVC~z!d%A?O#SP?P$CwU}>B5%^Jg~t?^ox~2fW0m1I4c)MyxUUT&!V8Kp&;}|7hb|%60zin)*T|!F4mq+T3Ci{eh`0UR} zOgGeQYuz|V2vy#nG8qqwMH2Kr?2Xyu@DC1H8%-T3&C~P@8m!o6sm{2gG;}vF7tXM2 z%*~J6>{`sg)kRkDI;`vuR#KE{)$i>7Hlt9U9?tUmKvFErpAUZapI^dl_S>QQKYI1u z!YLoAMl@A=hDeoXB#fhhedflHBKaPPx6v&(Cf0Av$#;@s5zvmCV3{pl*`lgV(`-qW zF}_q-`0m{YhFXnx6I}hY6zwaZyw1&x`WA+Hngbb*yR< zsk0ye!~Pn8Zq&)5i)i{O#WXp`04PX~oG!Zm@o=WqBk7yl&vZ@q2P9tn{SW*vtJ!}n zv;R{x`+vPs$>jHiUDr@xA@h_Bn+u&czxY;dg=O#i3+oz+pwCkXwhb-zxr02mwB`)# z*-lpwnqvC(^`D+`vIvL*{gx~~jK!^D!Ct^D{%{TTg;6<2I(c6ID?^=vY6Z9ukU1H- zChJ^m#GH)rWo+eVKcS2%BMeHczPNI|k{6%w!r`G2_l)58Go{xSp%;*#?4k*RJqZ{3 zgC47)hAM%dSZtbRT32T0qUk-y;n_8&8Uq^ zxhSCcK@QA#WO$by_5xbDxLOO6^oQs_nOFo}r<5qQ1Ra36<>&c`Uy;zmJpPpLU;1`fR1y8eh-&ozRq4^4b zK0YMCybdLcAGZ2>=D)S~`_6|NveK|p4il_@d?MCLgG62hc1n1F_(8E)_U>fK-e(f7UAUEN*PUF9FC zRKeNjWS_nET5EsnTWe`(cilg&`l#^Kl~rO3k)F%V5k$h98V(CUDO|P{Zweh%S*ruH zCa!n-qH4?boivEoEH)avV0%gRiJ9*22B>|zqF2b!esUWOz#EP36lQvO3qr)_2rM=L zAx7Bs*+_#UPRjMDiBAj3cQ_;65-9&h(k1fNqej2(%^Cq)mQJu?qD+(+7<)iy@Woyx z`~^;*Dm=(>E)Btou%jKJ?$t|We|*R{nW!D#k!43u@qphNQj_2D;cj5x(+>&dC=Vpv zV%j!_=*=q$BbR?AH7Lb}G^@bFTUGXQG!oQmLyz`rJybjq;;yr_(>2M4X7}#e3%9rP z_XJ@?9~6x;om0y@brrYTgc3Uv=gX|9CLk)=jEG4d`t5LKW&F;fJ0XFFZ`5DC?$^9! zlIU>Vb%z$_gpk1<2~Ob4Q>nhrpHLMuH92Iav>aYPpD&W!;UD85o;Z30z)#MWU$ zYV7^rf2{7X<_4XeYEn*nCD!e2@*&~0Ae(~4f?g?-9A+qzRozx1DeU=4YPyp@HVTDX zu^-Ik`%j3&pV_GYfxZ)UVACP#52eNCB1*2*Qo;lSRAdk;2>U27f+$~(Jrc=Bb4V=p z^zO=eC~409Fk0H~o{&Dka5Zf$(M$TApg&!`wcIq`1|FAkCgK~kwd|{-C9d!^&DX^*B z6b>P+0M)o;F*OAGzHITTlHcY`ow_>Abr|*WxvcKCrQP+n&D*EFGTzIY3(w4&+44*Y zgN!Q7Vj@qGZ^vQ87{b}A&Lc($+L%|cKZA60kZv`!Xt;7@kP>kVL_SbBfVjS z%XcM0d49yk3${!%{x%CsXo|hu*4&ahpP*BL<4b2VvGTzhRMW7So_j7)MAEZ=&Vx8ICwCSCxZ~GPDg1^;fL0A2&Y^gLOQg zIevuRUQjK#>KHmuakpyX%%DICddicB=-hjPk_JWmCCC!F+IMqc zhIZ|#YW+a9tRFU|+LyHVE4qnI^z0Y)1J1l9aWok{`k1M{6ki*9y_kQFGh7)2P#xuk zfaoS=+pBetIt*fNOV8w-y>nk-x4xWBfZR77x!nEr?X@UF3Ui7PIjPREZGwdh)zA_P z2-yZXK8o|59A&fzXKQihNOxW~wQX9)EM3ucZTANJ7pox~*CmRJG?r^2$hV}nBF9J; zkflURm&B=eWo5#i{>aCHIX^gIiHtFmnRDzP(J{$8e%P5#8gS zhw=>vD|-zuaX&85oWqTBbKU1gPoJxEKVvTY=)(Ey@Y|8jX+eB2K~yY$9dOgu11Mt* zF9((I_Tz}Q;msLxWkaq|+B6*ln{fYWRKNLAM0NS~!*_Nri$2_*Y)SnXSJtFdSgX0c z$_P*z?*q0};|04$Vjmz^j}P&3;* zeAnz*?DLxt60|5S#V8ud6C3pazY!a6zZPbw5yZj(;7YbauywlG-n+7GI@4KDb51NT zX@;27DORDbV!Bd(?BliDMxNlw(a@n*#yXTwPYk(iKHM8wgg?pi{~|<;9HuXvdt~zR z(8P}K^9H+g6ZZruiJUWYp4a%&10f~uYvb3kw{cC_%Q3gAQmw){tBp0GPb(}|=#6sq ziDdBI2asLoJBIxr~)212s2rzp-66#>ZV8rPOlAg8$3o2U% zi<{xDb*XscyJRgN_e&yUyCr5Zhw21?a)CueE0UJ-Ja{LBJ_C6eEE{wb0iIQ=tv zMtB5&V*r3Xe-PRBJ#f4|l#9*xAW8_2;xw!Vyz89A$tNe`dIM@(T=)1 zPDJX7K81UX9x>_{B>I=c>kCzei;RxDQ)CK{bQy761}b4tX zE*ExXw8WzUhSk`FSRujUFE)t-1oGZRrd)MW%kJPJ=g$UTV;ynHX=6S zkS&B=LwJp>GeM!OdS?q|o90aPy>QLRAjSx*?Z!IXcc zo>Z7R#?gqc(+OlD#GsT$B3AKc8Qo{qyLhSi)uHuob)Gz0|3c(0)h&+LXic2jD@box zPiO~yw{7aD84`Sv0UI4SYq@mls?YBF3X4!9+KGL+se^~}JXea|KY7vE#9SUD%L)=I zh+{$|bb#_cR*=6rTm>K!(h?`O@* z<-C2DdNE8m@4m;xJ70@MhBOI}+?dS!dUaLK`a zs;UC-S_|!>(en|U_Mf0*uFiG+#a1SAEWD9OS2@PkUo6jLG>|y(E9{tYhY347I?Y(+ z+2(#)xSsvHtI1B`&yl;UZq9`UOYA@C!VML?Bk1EqxqJYjxB)DFn=$0FH*Y#Y#t}zq zYS_cxHc53wjXmU<6t&jV4P$m){J7)jxY!rF#h+KWb?lTFJdwALf1$TdETBr|*f3rz zZxkZTbj}7TtkBLUFRHbVo{fD=oH};wSS!JdK&7ETj0nM3z%Wm)Ez(~=4ajwlI84hW zG)vja@`jgm@RE)aWpsA%m51_;tEw_Co6@o!eHJipC-qg8N6Zb(A9&vElX>nb*-ZAm~1E0q;a%VuDqpjU^t~Qj$JBkbBdO;m*N@Ifr zH2ZSj0NyMU>M?cFzdH2Y@TpgU_o)YoGbskfN=?=q_Q%~bU&BILZsCWCQDn9a2tL3} zIO30U7HUp%v!+enmJaG%{z79)8?}bv#^tW<5N-dmZpj%yp<*}xQiKH1w9OhZhWZT( zWYe1oYEU_ce(|6K0y7+LJ&n|Y)5fz!UM2)*8)Mt-n7MYw3C$|KjrJ$z0`gYLO4>- z^9B0??NgaA5k!Y^AP(Gl*yW;n^#q@KXq4TCgD-=swEv4 zf$s*bJZsqw_5w=75qsenL%wJUV0^BgY*5E5mnQa6MW$~&k=voW0_TP4wS=}FYd>Lw z?1j^heTN%(&x&QHZ}wTIy5AJ0nh4p>5#QIJ<_S06;bBMl%0n{^X`zUA+%lIdnMUFyqwu$eu>6`xK^|KqFf^{E%Yi3dT^4pkz_cFR4Ni(ykJV?;sr{yE*)1* z4Nh}-m4rh#A1!~Cc~NOg%9ZYiN_gd0cSUY{R`X@6r~OA36JPMp36g}nTMAfT$GYL^ z4kmBQE9^iRL6cnug^F-|rp{9%2v(D7uRfiXTMj+~aKM@2)i@=(65O#6q z8wC>04cZ)yh)tf=HG7M&cl5bAX)2cE0m2JWowrqg^gd$#_`ShF-VsNRq%e!Uc}{?i zpui-LO(J1X5(4g_HH}sFw11`7qY&XIht(-+kGA7}*=nUCXQQopbG)yt>MUa`ky)9s zc@QME08LY&aNiFh>o|d%TSiOc5Y55$ukL&~K3uih=_U3qT2syDiW++N=;DPtRYpEq zvxrdy(<}7Q$x1~<3`p6=h6S*&OHW{s(Y!a=_7TVIs@!qBA9s+Q_0b`wSQS{`)3; zCCpB|8WhOVtn86Gi!EOMCUvVW{n_)KM!j3kt9`rONV=gF4KDP+6M-P!_vPDwbA%{V z+oG7g)(jiV*1mb5Pm8OcW}$Mrn;hT8NcFlE&-&v&L}^7Q+~yCQp>>b@6YB2XafHwIH# zy;#a~$6w}{9vZvlv&yDnq`y8Vpdi~@&y+QHR6eomu*1!> z`*7DJJJ#3s0Dq5dUm^v<&umN7I({&lNWvPL7vgOwf1Ta=``6ZM7FUkVCW*RgzileK ze8yll1r30tvT5g_K<^uZet<5ij6i2$^z_j=Sh1M7liN*mBEavn7CG-Pv6 z^w0TwezW~AQ;%eT%!*Xay{l@E4n&Zm7hzB01_4QEz-tKJG#rH7nh+aDT7)R? zi|7OOd9+V~0jb3B%y^o)!cu>aqFAbV@-Z8^PM=4m@0|%}z_?H$ov;~v+wv_0IYwLg zH^N(wqv9*S#srVV;kB~uaD>%M4n2mrIJNnA^?((F4X?#dNy0H+^);W+nu`oc z3bO#Q#V~{?LKtSq6@SA>P+73B23?LVsSf2WUD;k<7k;7YzC&Qm6@$P4y+?EUv0KD@ zpT36}!c8xE_BaezvybTU9WrC*Bf>eKQv-_fpx`gUZ&1nQH_y+i4SLg#dU@gO+STGU z4d?Bw<_%bNXs%JsP$d@H-NWC0dcnSTnQu_!3@XnruF));3m>9a`DI*ffM zh1~G6HB^>oe3mMd#*{i0y(5Ua8|;{IxlrDvRn=f3M>r1TLHc*OXV2`)L0JowaUEc+R58)(o%{_;Xsk?)Mt&=SiQmg29{2iTS!NhDD|hbJxNX>NuL>Z5x~Kbu z!>H=ruGe5~Ct`2<3;Ixl1V63%17bu`;RI$U${RH*F`Yxs{e+U?D{yM3R7&!o6K(RT zS@zhPVWJg;m;0?k#e~Pq~L}1t!WJz3en*w6fGC>^Y z(imJ-kmK;VswNuB^{@Z9!eZGb7DZn_Ag-EVNR5w6FM;-w9$@iK91W02_v)%y&6*9A zm_J6>!oQqJ>NYVZ7a-?>$S@;_6{u!nEQM`VFTr&fLRNagqMeo5lcle4)y)buI^zoV zXVHO%KCXc=9Zns-JFStA-^RK+Y7QOM{QVYqVe+VJ38DlVru+%HVubp26XeSp0bGl? z)VSpW?F@}KG1ldMzh~8?N`+WB1ckS56^niI{zJi@n;X^x&qu6KKq9CRTFC6P44VDO zRB@W`)Pz(kE&g79*UQ$sT{#|WqE`o011cl_6v^8vcKJiD&u`cE?I^|wB|E@qr`Am%D#E0qD1n|R;A+y zs`>ITZVzZR#Q+Nj7xrcB?!!w=BR4~Po93H`X)>j(6MF~dIH!d40h(8oj-%w(37Pk0 zsGm)kS*OT^z{ekty!*-kXLsr#WXUrZwg{E6q5z3H1bwx8_r}S=so!WU4|iKV7B=ru z?t^f9+OaN8`sqd8$rAhxa7xp~y9wUZIkB&By?Tw5(Rp~Cii&rf>gc+FwYY4L)rpr$ zlTDLSr^`AInPxxheBgLcRh0}(^8L8?^IRJE_UFf~K$(!fGK%Gd9a%v=#ZU802wfnp z3Gn?W(EePir7yRJT@rmA3gp;_qA+Y!ympCyr_&zEvwKg}O63l?o+=M|a5O0Tc3#p4 zHOII&vu2azQsmN)X><(jYa2R}A!n%Z0&k>`#V+P-P4fJiVejIi4A$^+-^}`p z@mnIzI+2}^ldK5=f)pS|*~BW+Qaw-RGh=6i3SJjd&Hnt;KknmN9!o3EeR`PwF!PpIS$d|X@WGW#hLsRe!ufdAJArD6_9zksTVo}`T zRj=1a*KEouOvb)6p%RQe6i_ucU}%0X-pW56z5RQD?M^0x zeHBJqXv0*e@ZmjPoRIfiA*_#}jB9G@$0~lNk!e0hLV~78v~{LP=+vWjw^I_W$|~9e zO%>cty9kklrY-!t2~&}Tqt1BE+Gk5kGEw+#9A z5t|IpaMFaEJDo(Ek6X``7MZ&R5|z^nb+r3iy{0vN=JrQR-q;;!`d*1ev7#`^^&B*74S*FaY(#KtO54)`HI4#FxVw=kF`E0P>L|OuXeg=n z=_tDW^S!tCdKDLm*D0fDDnlCBQwjnf4E3=Rq!xfLgC}Ow+)>xq*-wUP^7nEb9yM*W zzk4e^y{Ra?G>E2!RfEJLe6U{^Z{UCRcCmf>B=bQXcBzs6rPJQSOj{ZcPat2BPB}*_(`1_aT zb9k77Hf!tVRU1gVGc zyw87btZPWO-}Ug1XDp{AgZ*35-|iAza)SLmH-JI?`?4L;%@Z3mxZ3m2|8)MZm-3%q zUfS+`>%tCtr} z(@9ze(ONJ6yBwhYwfY70GjH#fXJpnO{uYCO38?#jDGvr;{`sZ}-ZKB8lx6*T;D5Ho ze!bFvQcb^v=0DZT|96C@ppM#>eGHNDo|ErBwoV+xzMKaUOn1_A@7MZ7D--f$ksrC=t21ccdi5h8lfb`=~6WD7t* z{@X4g@bV$mAy9YR&Ft6ZzuwF*A^5dDerbYVI`Wq}_+==6`2@ebrC*NW|KBb%9=Q7d zV=AduCn(l|i3=`S(&VgB5wex^P@@3{&RP_bZ~of&I-Key-8%a*W$UZ+i_4{?#QTu} PyVfOZBwdvFbL_tXtBIRo literal 0 HcmV?d00001 diff --git a/migration-guide.md b/migration-guide.md index bedec85..81cab25 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -8,21 +8,51 @@ The purpose of this guide is to help easily upgrade to Azure Cosmos DB Java SDK ## Background -| Java SDK | Release Date | Bundled APIs | Maven Jar | API Reference | Release Notes | -|-------------------------|--------------|--------------|-----------------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| -| Async 2.x.x | June 2018 | Async | com.microsoft.azure::azure-cosmosdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | -| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | -| 3.x.x | July 2019 | Async/Sync | com.microsoft.azure::azure-cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | -| 4.0 | April 2020 | Async/Sync | com.azure::azure-cosmos | - | - | +| Java SDK | Release Date | Bundled APIs | Maven Jar | Java package name |API Reference | Release Notes | +|-------------------------|--------------|----------------------|-----------------------------------------|-------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| +| Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | +| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | +| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | | - | - | + +## Important implementation changes + +### RxJava replaced with reactor in Java SDK 3.x.x and 4.0 + +If you have been using a pre-3.x.x Java SDK, it is recommended to review our [Reactor pattern guide](reactor-pattern-guide.md) for an introduction to async programming and Reactor. + +Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Guide]() for additional guidance on converting RxJava code to use Reactor. + +## Important API changes + +### Naming conventions + +![Java SDK naming conventions](media/java_sdk_naming_conventions.jpg) + +* Java SDK 3.x.x and 4.0 refer to clients, resources, etc. as ```Cosmos```*X*; for example ```CosmosClient```, ```CosmosDatabase```, ```CosmosContainer```..., whereas version 2.x.x Java SDKs did not have a uniform naming scheme. + +* Java SDK 3.x.x and 4.0 offer Sync and Async APIs. + * **Java SDK 4.0**: classes belong to the Sync API unless the name has ```Async``` after ```Cosmos```. + * **Java SDK 3.x.x**: classes belong to the Async API unless the name has ```Sync``` after Cosmos. + * **Async Java SDK 2.x.x**: similar class names to **Sync Java SDK 2.x.x** but the class name starts with ```Async```. + +### Representing items + +### Imports + +### Accessors + +### QueryMetrics -## Breaking API changes ## Code snippet comparisons -### Naming conventions +### Create resources + +### Item operations -### Create database +### Indexing -### Create container +### Stored procedures -### \ No newline at end of file +### Change Feed \ No newline at end of file From 9f1862eee03fd14403bebecf62c816ac76fd16f1 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Sun, 29 Mar 2020 23:36:01 -0700 Subject: [PATCH 087/110] State of migration guide --- migration-guide.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/migration-guide.md b/migration-guide.md index 81cab25..1d828a7 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -12,8 +12,8 @@ The purpose of this guide is to help easily upgrade to Azure Cosmos DB Java SDK |-------------------------|--------------|----------------------|-----------------------------------------|-------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| | Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | | "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | -| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | -| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | | - | - | +| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | com.azure.data.cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | com.azure.cosmos | - | - | ## Important implementation changes @@ -36,10 +36,24 @@ Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Gui * **Java SDK 3.x.x**: classes belong to the Async API unless the name has ```Sync``` after Cosmos. * **Async Java SDK 2.x.x**: similar class names to **Sync Java SDK 2.x.x** but the class name starts with ```Async```. -### Representing items +### Hierarchical API + +Java SDK 4.0 and Java SDK 3.x.x introduce a hierarchical API which organizes clients, databases and containers in a nested fashion, as shown in this Java SDK 4.0 code snippet: + +```java +CosmosContainer = client.getDatabase("MyDatabaseName").getContainer("MyContainerName"); +``` + +In version 2.x.x Java SDKs, all operations on resources and documents are performed through the client instance. + +### Representing documents + +In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writing and reading documents from Azure Cosmos DB. ### Imports + + ### Accessors ### QueryMetrics From 286d4c52001b7cbfeff3ddb886f80a57dfe71344 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 00:15:57 -0700 Subject: [PATCH 088/110] Java SDK 4.0 content for Migration Guide --- migration-guide.md | 297 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 295 insertions(+), 2 deletions(-) diff --git a/migration-guide.md b/migration-guide.md index 1d828a7..9a0cd6d 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -23,6 +23,10 @@ If you have been using a pre-3.x.x Java SDK, it is recommended to review our [Re Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Guide]() for additional guidance on converting RxJava code to use Reactor. +### Java SDK 4.0 implements **Direct Mode** in Async and Sync APIs + +If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **ConnectionMode** based on TCP is implemented in Java SDK 4.0 for both the Async and Sync APIs. + ## Important API changes ### Naming conventions @@ -52,21 +56,310 @@ In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writi ### Imports +* Java SDK 4.0 packages begin with ```com.azure.cosmos``` +* Java SDK 3.x.x packages begin with ```com.azure.data.cosmos``` ### Accessors -### QueryMetrics +Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing instance members. +* Example: a ```CosmosContainer``` instance has ```container.getId()``` and ```container.setId()``` methods. +This is different from Java SDK 3.x.x which exposes a fluent interface. +* Example: a ```CosmosSyncContainer``` instance has ```container.id()``` which is overloaded to get or set ```id```. ## Code snippet comparisons ### Create resources +**Java SDK 4.0 Async API:** + +```java +ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); +// Setting the preferred location to Cosmos DB Account region +// West US is just an example. User should set preferred location to the Cosmos DB region closest to the application +defaultPolicy.setPreferredLocations(Lists.newArrayList("Your Account Location")); +// Use Direct Mode for best performance +defaultPolicy.setConnectionMode(ConnectionMode.DIRECT); + +// Create Async client. +// Building an async client is still a sync operation. +client = new CosmosClientBuilder() + .setEndpoint("your.hostname") + .setKey("yourmasterkey") + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + +// Describe the logic of database and container creation using Reactor... +Mono databaseContainerIfNotExist = + // Create database with specified name + client.createDatabaseIfNotExists("YourDatabaseName") + .flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + // Container properties - name and partition key + CosmosContainerProperties containerProperties = + new CosmosContainerProperties("YourContainerName", "/id"); + // Create container with specified properties & provisioned throughput + return database.createContainerIfNotExists(containerProperties, 400); +}).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + return Mono.empty(); +}).subscribe(); +``` + +**Java SDK 3.x.x Async API:** + +```java + +``` + ### Item operations +**Java SDK 4.0 Async API:** + +```java +// Container is created. Generate many docs to insert. +int number_of_docs = 50000; +ArrayList docs = generateManyDocs(number_of_docs); + +// Insert many docs into container... +Flux.fromIterable(docs).flatMap(doc -> container.createItem(doc)) + // ^Publisher: upon subscription, createItem inserts a doc & + // publishes request response to the next operation... + .flatMap(itemResponse -> { + // ...Streaming operation: count each doc... + number_docs_inserted.getAndIncrement(); + return Mono.empty(); +}).subscribe(); // ...Subscribing or blocking triggers stream execution. +``` + +**Java SDK 3.x.x Async API:** + +```java + +``` + ### Indexing +**Java SDK 4.0 Async API:** + +```java +CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + +// Custom indexing policy +IndexingPolicy indexingPolicy = new IndexingPolicy(); +indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); + +// Included paths +List includedPaths = new ArrayList<>(); +IncludedPath includedPath = new IncludedPath(); +includedPath.setPath("/*"); +includedPaths.add(includedPath); +indexingPolicy.setIncludedPaths(includedPaths); + +// Excluded paths +List excludedPaths = new ArrayList<>(); +ExcludedPath excludedPath = new ExcludedPath(); +excludedPath.setPath("/name/*"); +excludedPaths.add(excludedPath); +indexingPolicy.setExcludedPaths(excludedPaths); + +containerProperties.setIndexingPolicy(indexingPolicy); + +CosmosAsyncContainer containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400) + .block() + .getContainer(); +``` + +**Java SDK 3.x.x Async API:** + +```java + +``` + ### Stored procedures -### Change Feed \ No newline at end of file +**Java SDK 4.0 Async API:** + +```java +logger.info("Creating stored procedure...\n"); + +sprocId = "createMyDocument"; +String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; +CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); +container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); + +// ... + +logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); + +CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); +options.setPartitionKey(new PartitionKey("test_doc")); + +container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.getResponseAsString(), + executeResponse.getStatusCode(), + executeResponse.getRequestCharge())); + return Mono.empty(); + }).block(); +``` + +**Java SDK 3.x.x Async API:** + +```java + +``` + +### Change Feed + +**Java SDK 4.0 Async API:** + +```java +ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + logger.info("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + try { + //Change Feed hands the document to you in the form of a JsonNode + //As a developer you have two options for handling the JsonNode document provided to you by Change Feed + //One option is to operate on the document in the form of a JsonNode, as shown below. This is great + //especially if you do not have a single uniform data model for all documents. + logger.info("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + + //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, + //as shown below. Then you can operate on the POJO. + CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.getId()); + + } catch (JsonProcessingException e) { + e.printStackTrace(); + } + } + logger.info("--->handleChanges() END"); + + }) + .build(); +``` + +**Java SDK 3.x.x Async API:** + +```java + +``` + +### Container TTL + +**Java SDK 4.0 Async API:** + +```java +CosmosAsyncContainer container; + +// Create a new container with TTL enabled with default expiration value +CosmosContainerProperties containerProperties = new CosmosContainerProperties("myContainer", "/myPartitionKey"); +containerProperties.setDefaultTimeToLiveInSeconds(90 * 60 * 60 * 24); +container = database.createContainerIfNotExists(containerProperties, 400).block().getContainer(); +``` + +**Java SDK 3.x.x Async API:** + +```java +CosmosContainer container; + +// Create a new container with TTL enabled with default expiration value +CosmosContainerProperties containerProperties = new CosmosContainerProperties("myContainer", "/myPartitionKey"); +containerProperties.defaultTimeToLive(90 * 60 * 60 * 24); +container = database.createContainerIfNotExists(containerProperties, 400).block().container(); +``` + +### Document TTL + +**Java SDK 4.0 Async API:** + +```java +// Include a property that serializes to "ttl" in JSON +public class SalesOrder +{ + private String id; + private String customerId; + private Integer ttl; + + public SalesOrder(String id, String customerId, Integer ttl) { + this.id = id; + this.customerId = customerId; + this.ttl = ttl; + } + + public String getId() {return this.id;} + public void setId(String new_id) {this.id = new_id;} + public String getCustomerId() {return this.customerId;} + public void setCustomerId(String new_cid) {this.customerId = new_cid;} + public Integer getTtl() {return this.ttl;} + public void setTtl(Integer new_ttl) {this.ttl = new_ttl;} + + //... +} + +// Set the value to the expiration in seconds +SalesOrder salesOrder = new SalesOrder( + "SO05", + "CO18009186470", + 60 * 60 * 24 * 30 // Expire sales orders in 30 days +); +``` + +**Java SDK 3.x.x Async API:** + +```java +// Include a property that serializes to "ttl" in JSON +public class SalesOrder +{ + private String id; + private String customerId; + private Integer ttl; + + public SalesOrder(String id, String customerId, Integer ttl) { + this.id = id; + this.customerId = customerId; + this.ttl = ttl; + } + + public String id() {return this.id;} + public void id(String new_id) {this.id = new_id;} + public String customerId() {return this.customerId;} + public void customerId(String new_cid) {this.customerId = new_cid;} + public Integer ttl() {return this.ttl;} + public void ttl(Integer new_ttl) {this.ttl = new_ttl;} + + //... +} + +// Set the value to the expiration in seconds +SalesOrder salesOrder = new SalesOrder( + "SO05", + "CO18009186470", + 60 * 60 * 24 * 30 // Expire sales orders in 30 days +); +``` From 721a31424e783d0dc628748be90a235802279153 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 00:58:07 -0700 Subject: [PATCH 089/110] Migration guide ready for merge to master --- migration-guide.md | 240 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 188 insertions(+), 52 deletions(-) diff --git a/migration-guide.md b/migration-guide.md index 9a0cd6d..44899f0 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -54,11 +54,24 @@ In version 2.x.x Java SDKs, all operations on resources and documents are perfor In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writing and reading documents from Azure Cosmos DB. +In Java SDK 3.x.x ```CosmosItemProperties``` was exposed by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. + ### Imports * Java SDK 4.0 packages begin with ```com.azure.cosmos``` - -* Java SDK 3.x.x packages begin with ```com.azure.data.cosmos``` + * Java SDK 3.x.x packages begin with ```com.azure.data.cosmos``` + +* Java SDK 4.0 places a number of classes in a nested package, ```com.azure.cosmos.models```. This includes + * ```CosmosContainerResponse``` + * ```CosmosDatabaseResponse``` + * ```CosmosItemResponse``` + * And Async API analogs of all of the above... + * ```CosmosContainerProperties``` + * ```FeedOptions``` + * ```PartitionKey``` + * ```IndexingPolicy``` + * ```IndexingMode``` + * ...etc. ### Accessors @@ -77,7 +90,6 @@ This is different from Java SDK 3.x.x which exposes a fluent interface. ```java ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); // Setting the preferred location to Cosmos DB Account region -// West US is just an example. User should set preferred location to the Cosmos DB region closest to the application defaultPolicy.setPreferredLocations(Lists.newArrayList("Your Account Location")); // Use Direct Mode for best performance defaultPolicy.setConnectionMode(ConnectionMode.DIRECT); @@ -91,27 +103,51 @@ client = new CosmosClientBuilder() .setConsistencyLevel(ConsistencyLevel.EVENTUAL) .buildAsyncClient(); -// Describe the logic of database and container creation using Reactor... -Mono databaseContainerIfNotExist = - // Create database with specified name - client.createDatabaseIfNotExists("YourDatabaseName") - .flatMap(databaseResponse -> { - database = databaseResponse.getDatabase(); - // Container properties - name and partition key - CosmosContainerProperties containerProperties = - new CosmosContainerProperties("YourContainerName", "/id"); - // Create container with specified properties & provisioned throughput - return database.createContainerIfNotExists(containerProperties, 400); -}).flatMap(containerResponse -> { - container = containerResponse.getContainer(); - return Mono.empty(); + +// Create database with specified name +client.createDatabaseIfNotExists("YourDatabaseName") + .flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + // Container properties - name and partition key + CosmosContainerProperties containerProperties = + new CosmosContainerProperties("YourContainerName", "/id"); + // Create container with specified properties & provisioned throughput + return database.createContainerIfNotExists(containerProperties, 400); + }).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + return Mono.empty(); }).subscribe(); ``` **Java SDK 3.x.x Async API:** ```java +ConnectionPolicy defaultPolicy = ConnectionPolicy.defaultPolicy(); +// Setting the preferred location to Cosmos DB Account region +defaultPolicy.preferredLocations(Lists.newArrayList("Your Account Location")); +// Create async client +// +client = new CosmosClientBuilder() + .endpoint("your.hostname") + .key("yourmasterkey") + .connectionPolicy(defaultPolicy) + .consistencyLevel(ConsistencyLevel.EVENTUAL) + .build(); + +// Create database with specified name +client.createDatabaseIfNotExists("YourDatabaseName") + .flatMap(databaseResponse -> { + database = databaseResponse.database(); + // Container properties - name and partition key + CosmosContainerProperties containerProperties = + new CosmosContainerProperties("YourContainerName", "/id"); + // Create container with specified properties & provisioned throughput + return database.createContainerIfNotExists(containerProperties, 400); + }).flatMap(containerResponse -> { + container = containerResponse.container(); + return Mono.empty(); +}).subscribe(); ``` ### Item operations @@ -124,21 +160,25 @@ int number_of_docs = 50000; ArrayList docs = generateManyDocs(number_of_docs); // Insert many docs into container... -Flux.fromIterable(docs).flatMap(doc -> container.createItem(doc)) - // ^Publisher: upon subscription, createItem inserts a doc & - // publishes request response to the next operation... - .flatMap(itemResponse -> { - // ...Streaming operation: count each doc... - number_docs_inserted.getAndIncrement(); - return Mono.empty(); -}).subscribe(); // ...Subscribing or blocking triggers stream execution. +Flux.fromIterable(docs) + .flatMap(doc -> container.createItem(doc)) + .subscribe(); // ...Subscribing triggers stream execution. ``` **Java SDK 3.x.x Async API:** ```java +// Container is created. Generate many docs to insert. +int number_of_docs = 50000; +ArrayList docs = generateManyDocs(number_of_docs); +// Insert many docs into container... +Flux.fromIterable(docs) + .flatMap(doc -> container.createItem(doc)) + .subscribe(); // ...Subscribing triggers stream execution. ``` +(the same) + ### Indexing @@ -175,7 +215,31 @@ CosmosAsyncContainer containerIfNotExists = database.createContainerIfNotExists( **Java SDK 3.x.x Async API:** ```java +CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + +// Custom indexing policy +IndexingPolicy indexingPolicy = new IndexingPolicy(); +indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); //To turn indexing off set IndexingMode.NONE +// Included paths +List includedPaths = new ArrayList<>(); +IncludedPath includedPath = new IncludedPath(); +includedPath.path("/*"); +includedPaths.add(includedPath); +indexingPolicy.setIncludedPaths(includedPaths); + +// Excluded paths +List excludedPaths = new ArrayList<>(); +ExcludedPath excludedPath = new ExcludedPath(); +excludedPath.path("/name/*"); +excludedPaths.add(excludedPath); +indexingPolicy.excludedPaths(excludedPaths); + +containerProperties.indexingPolicy(indexingPolicy); + +CosmosContainer containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400) + .block() + .container(); ``` ### Stored procedures @@ -225,7 +289,43 @@ container.getScripts() **Java SDK 3.x.x Async API:** ```java +logger.info("Creating stored procedure...\n"); + +sprocId = "createMyDocument"; +String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; +CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); +container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); + +// ... +logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); + +CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); +options.partitionKey(new PartitionKey("test_doc")); + +container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.responseAsString(), + executeResponse.statusCode(), + executeResponse.requestCharge())); + return Mono.empty(); + }).block(); ``` ### Change Feed @@ -233,28 +333,64 @@ container.getScripts() **Java SDK 4.0 Async API:** ```java -ChangeFeedProcessor.changeFeedProcessorBuilder() - .setHostName(hostName) - .setFeedContainer(feedContainer) - .setLeaseContainer(leaseContainer) - .setHandleChanges((List docs) -> { +ChangeFeedProcessor changeFeedProcessorInstance = + ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + logger.info("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + try { + //Change Feed hands the document to you in the form of a JsonNode + //As a developer you have two options for handling the JsonNode document provided to you by Change Feed + //One option is to operate on the document in the form of a JsonNode, as shown below. This is great + //especially if you do not have a single uniform data model for all documents. + logger.info("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + + //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, + //as shown below. Then you can operate on the POJO. + CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.getId()); + + } catch (JsonProcessingException e) { + e.printStackTrace(); + } + } + logger.info("--->handleChanges() END"); + + }) + .build(); + +// ... + + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .subscribe(); +``` + +**Java SDK 3.x.x Async API:** + +```java +ChangeFeedProcessor changeFeedProcessorInstance = + ChangeFeedProcessor.Builder() + .hostName(hostName) + .feedContainer(feedContainer) + .leaseContainer(leaseContainer) + .handleChanges((List docs) -> { logger.info("--->setHandleChanges() START"); - for (JsonNode document : docs) { + for (CosmosItemProperties document : docs) { try { - //Change Feed hands the document to you in the form of a JsonNode - //As a developer you have two options for handling the JsonNode document provided to you by Change Feed - //One option is to operate on the document in the form of a JsonNode, as shown below. This is great - //especially if you do not have a single uniform data model for all documents. - logger.info("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(document)); - - //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, - //as shown below. Then you can operate on the POJO. - CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); - logger.info("----=>id: " + pojo_doc.getId()); - - } catch (JsonProcessingException e) { + + // You are given the document as a CosmosItemProperties instance which you may + // cast to the desired type. + CustomPOJO pojo_doc = document.getObject(CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.id()); + + } catch (Exception e) { e.printStackTrace(); } } @@ -262,12 +398,12 @@ ChangeFeedProcessor.changeFeedProcessorBuilder() }) .build(); -``` -**Java SDK 3.x.x Async API:** - -```java +// ... + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .subscribe(); ``` ### Container TTL @@ -347,11 +483,11 @@ public class SalesOrder } public String id() {return this.id;} - public void id(String new_id) {this.id = new_id;} - public String customerId() {return this.customerId;} - public void customerId(String new_cid) {this.customerId = new_cid;} + public SalesOrder id(String new_id) {this.id = new_id; return this;} + public String customerId() {return this.customerId; return this;} + public SalesOrder customerId(String new_cid) {this.customerId = new_cid;} public Integer ttl() {return this.ttl;} - public void ttl(Integer new_ttl) {this.ttl = new_ttl;} + public SalesOrder ttl(Integer new_ttl) {this.ttl = new_ttl; return this;} //... } From cc1fd08d12609d2305102f42cb4ab4adec018bbe Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 00:59:45 -0700 Subject: [PATCH 090/110] Title capitalization --- reactor-pattern-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index a9a7e02..acaa89d 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -1,4 +1,4 @@ -# Reactor Pattern Guide +# Reactor pattern guide The purpose of this guide is to help you get started using Reactor-based Java SDKs by understanding basic design patterns for the Reactor framework.The [Project Reactor](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) website has further documentation if you want to learn more. From c32c8ec87afa1235b4cdbaecb071379164aafa60 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 01:00:42 -0700 Subject: [PATCH 091/110] Update migration-guide.md --- migration-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/migration-guide.md b/migration-guide.md index 44899f0..b051eb2 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -25,7 +25,7 @@ Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Gui ### Java SDK 4.0 implements **Direct Mode** in Async and Sync APIs -If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **ConnectionMode** based on TCP is implemented in Java SDK 4.0 for both the Async and Sync APIs. +If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **ConnectionMode** based on TCP (as opposed to HTTP) is implemented in Java SDK 4.0 for both the Async and Sync APIs. ## Important API changes From e40426d67c78e78092fe71c9781d1c10f4151be3 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 01:04:02 -0700 Subject: [PATCH 092/110] Fixed image not appearing --- migration-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/migration-guide.md b/migration-guide.md index b051eb2..6a96489 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -31,7 +31,7 @@ If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **Con ### Naming conventions -![Java SDK naming conventions](media/java_sdk_naming_conventions.jpg) +![Java SDK naming conventions](media/java_sdk_naming_conventions.JPG) * Java SDK 3.x.x and 4.0 refer to clients, resources, etc. as ```Cosmos```*X*; for example ```CosmosClient```, ```CosmosDatabase```, ```CosmosContainer```..., whereas version 2.x.x Java SDKs did not have a uniform naming scheme. From ababdf2b425a43ec76d39b185cee4987a2ece8fb Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 01:09:16 -0700 Subject: [PATCH 093/110] Added Java package names for v2.x.x SDKs --- migration-guide.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/migration-guide.md b/migration-guide.md index 6a96489..fa92fc2 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -8,12 +8,12 @@ The purpose of this guide is to help easily upgrade to Azure Cosmos DB Java SDK ## Background -| Java SDK | Release Date | Bundled APIs | Maven Jar | Java package name |API Reference | Release Notes | -|-------------------------|--------------|----------------------|-----------------------------------------|-------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| -| Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | -| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | -| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | com.azure.data.cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | -| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | com.azure.cosmos | - | - | +| Java SDK | Release Date | Bundled APIs | Maven Jar | Java package name |API Reference | Release Notes | +|-------------------------|--------------|----------------------|-----------------------------------------|----------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| +| Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | com.microsoft.azure.cosmosdb.rx | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | +| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | com.microsoft.azure.cosmosdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | +| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | com.azure.data.cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | com.azure.cosmos | - | - | ## Important implementation changes From 089309937cbb397f8f363c09c69e54d058979326 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 02:09:13 -0700 Subject: [PATCH 094/110] Minor changes --- migration-guide.md | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/migration-guide.md b/migration-guide.md index fa92fc2..f3f1622 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -44,7 +44,7 @@ If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **Con Java SDK 4.0 and Java SDK 3.x.x introduce a hierarchical API which organizes clients, databases and containers in a nested fashion, as shown in this Java SDK 4.0 code snippet: -```java +```java"""" CosmosContainer = client.getDatabase("MyDatabaseName").getContainer("MyContainerName"); ``` @@ -54,15 +54,15 @@ In version 2.x.x Java SDKs, all operations on resources and documents are perfor In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writing and reading documents from Azure Cosmos DB. -In Java SDK 3.x.x ```CosmosItemProperties``` was exposed by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. - +In Java SDK 3.x.x ```CosmosItemProperties"`` 'as exposed"by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. +"""" ### Imports * Java SDK 4.0 packages begin with ```com.azure.cosmos``` * Java SDK 3.x.x packages begin with ```com.azure.data.cosmos``` * Java SDK 4.0 places a number of classes in a nested package, ```com.azure.cosmos.models```. This includes - * ```CosmosContainerResponse``` + * ```CosmosContainerResponse```' * ```CosmosDatabaseResponse``` * ```CosmosItemResponse``` * And Async API analogs of all of the above... @@ -71,15 +71,15 @@ In Java SDK 3.x.x ```CosmosItemProperties``` was exposed by the public API and s * ```PartitionKey``` * ```IndexingPolicy``` * ```IndexingMode``` - * ...etc. - -### Accessors + * ...etc.""'""'""'""' -Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing instance members. +### Accessors""'""'""' + +Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing in"ta"ce members. * Example: a ```CosmosContainer``` instance has ```container.getId()``` and ```container.setId()``` methods. -This is different from Java SDK 3.x.x which exposes a fluent interface. -* Example: a ```CosmosSyncContainer``` instance has ```container.id()``` which is overloaded to get or set ```id```. +This is different from Java SDK 3.x.x which exposes a f"ue"t interface. + Example: a ```CosmosSyncContainer``` instance has ```container.id()``` which is overloaded to get or set ```id```. ## Code snippet comparisons @@ -94,7 +94,7 @@ defaultPolicy.setPreferredLocations(Lists.newArrayList("Your Account Location")) // Use Direct Mode for best performance defaultPolicy.setConnectionMode(ConnectionMode.DIRECT); -// Create Async client. +// Create Async client.""""""''""""""'' // Building an async client is still a sync operation. client = new CosmosClientBuilder() .setEndpoint("your.hostname") @@ -106,11 +106,11 @@ client = new CosmosClientBuilder() // Create database with specified name client.createDatabaseIfNotExists("YourDatabaseName") - .flatMap(databaseResponse -> { + .flatMap(databas'Response -> { database = databaseResponse.getDatabase(); // Container properties - name and partition key CosmosContainerProperties containerProperties = - new CosmosContainerProperties("YourContainerName", "/id"); + new CosmosContaine'Properties("YourContainerName", "/id"); // Create container with specified properties & provisioned throughput return database.createContainerIfNotExists(containerProperties, 400); }).flatMap(containerResponse -> { @@ -120,9 +120,9 @@ client.createDatabaseIfNotExists("YourDatabaseName") ``` **Java SDK 3.x.x Async API:** - -```java -ConnectionPolicy defaultPolicy = ConnectionPolicy.defaultPolicy(); +"""""""""""" +```java""'""' +ConnectionPolicy defaultPolicy = ConnectionPolicy.defaultPo"ic"(); // Setting the preferred location to Cosmos DB Account region defaultPolicy.preferredLocations(Lists.newArrayList("Your Account Location")); @@ -133,7 +133,7 @@ client = new CosmosClientBuilder() .key("yourmasterkey") .connectionPolicy(defaultPolicy) .consistencyLevel(ConsistencyLevel.EVENTUAL) - .build(); + .build();"""" // Create database with specified name client.createDatabaseIfNotExists("YourDatabaseName") @@ -143,11 +143,11 @@ client.createDatabaseIfNotExists("YourDatabaseName") CosmosContainerProperties containerProperties = new CosmosContainerProperties("YourContainerName", "/id"); // Create container with specified properties & provisioned throughput - return database.createContainerIfNotExists(containerProperties, 400); + return database"createContainerIf"otExists(containerProperties, 400); }).flatMap(containerResponse -> { container = containerResponse.container(); return Mono.empty(); -}).subscribe(); +}).'ubscribe(); ``` ### Item operations @@ -160,8 +160,8 @@ int number_of_docs = 50000; ArrayList docs = generateManyDocs(number_of_docs); // Insert many docs into container... -Flux.fromIterable(docs) - .flatMap(doc -> container.createItem(doc)) +Flux.fromIterable(docs)"""" + .flatMap(doc -> container.createItem(doc))"""" .subscribe(); // ...Subscribing triggers stream execution. ``` @@ -177,7 +177,6 @@ Flux.fromIterable(docs) .flatMap(doc -> container.createItem(doc)) .subscribe(); // ...Subscribing triggers stream execution. ``` -(the same) ### Indexing From c60122d854f05b2ccdad07612ecde9a024cc61b9 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 03:47:37 -0700 Subject: [PATCH 095/110] First run at Reactor RxJava guide --- migration-guide.md | 8 +-- reactor-pattern-guide.md | 44 +++++++-------- reactor-rxjava-guide.md | 119 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+), 26 deletions(-) create mode 100644 reactor-rxjava-guide.md diff --git a/migration-guide.md b/migration-guide.md index f3f1622..39e0143 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -21,7 +21,7 @@ The purpose of this guide is to help easily upgrade to Azure Cosmos DB Java SDK If you have been using a pre-3.x.x Java SDK, it is recommended to review our [Reactor pattern guide](reactor-pattern-guide.md) for an introduction to async programming and Reactor. -Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Guide]() for additional guidance on converting RxJava code to use Reactor. +Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Guide](reactor-rxjava-guide.md) for additional guidance on converting RxJava code to use Reactor. ### Java SDK 4.0 implements **Direct Mode** in Async and Sync APIs @@ -44,7 +44,7 @@ If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **Con Java SDK 4.0 and Java SDK 3.x.x introduce a hierarchical API which organizes clients, databases and containers in a nested fashion, as shown in this Java SDK 4.0 code snippet: -```java"""" +```java CosmosContainer = client.getDatabase("MyDatabaseName").getContainer("MyContainerName"); ``` @@ -71,9 +71,9 @@ In Java SDK 3.x.x ```CosmosItemProperties"`` 'as exposed"by the public API and s * ```PartitionKey``` * ```IndexingPolicy``` * ```IndexingMode``` - * ...etc.""'""'""'""' + * ...etc. -### Accessors""'""'""' +### Accessors Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing in"ta"ce members. * Example: a ```CosmosContainer``` instance has ```container.getId()``` and ```container.setId()``` methods. diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md index acaa89d..356c1fd 100644 --- a/reactor-pattern-guide.md +++ b/reactor-pattern-guide.md @@ -26,7 +26,7 @@ How this differs from imperative programming, is that the coder is describing th ### 2. Reactive Streams Frameworks for Java/JVM -Reactive Streams frameworks implement the Reactive Streams Standard for specific programming languages. [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM) was the basis of past Azure Java SDKs, but will not be going forward. +A Reactive Streams framework implements the Reactive Streams Standard for specific programming languages. The [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM) framework was the basis of past Azure Java SDKs, but will not be going forward. [Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. @@ -44,7 +44,7 @@ To write a program using Reactor, you will need to describe one or more async op Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` by **subscribing**. -To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over HTTP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is “pulling” output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for the aforementioned HTTP request example); your code has to loop waiting for the dependency. +To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over HTTP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is "pulling" output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for the aforementioned HTTP request example); your code has to loop waiting for the dependency. In a "push" model the dependency signals your code to consume the HTTP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. @@ -54,15 +54,15 @@ Now I will illustrate this with Reactor code examples. Consider a Reminders app. ```java Flux reminderPipeline = ReminderAsyncService.getRemindersPublisher() // Pipeline Stage 1 - .flatMap(reminder -> “Don’t forget: ” + reminder) // Stage 2 - .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Stage 3 + .flatMap(reminder -> "Don't forget: " + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Stage 3 ``` **Subscribe phase (execute pipeline on incoming events)** ```java reminderPipeline.subscribe(System.out::println); // Async – returns immediately, pipeline executes in the background -while (true) doOtherThings(); // We’re freed up to do other tasks 😊 +while (true) doOtherThings(); // We're freed up to do other tasks 😊 ``` The ```Flux``` class internally represents an async operation pipeline as a DAG and provides instance methods for operating on the pipeline. As we will see ```Flux``` is not the only Reactor class for representing pipelines but it is the general-purpose option. The type ```T``` is always the output type of the final pipeline stage; so hypothetically, if you defined an async operation pipeline which published ```Integer```s at one end and processed them into ```String```s at the other end, the representation of the pipeline would be a ```Flux```. @@ -71,15 +71,15 @@ In the **Assembly phase** shown above, you describe program logic as an async op * **Stage 1**: ```ReminderAsyncService.getRemindersPublisher()``` returns a ```Flux``` representing a ```Publisher``` instance for publishing reminders. -* **Stage 2**: ```.flatMap(reminder -> “Don’t forget: ” + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline. The pipeline consists of +* **Stage 2**: ```.flatMap(reminder -> "Don't forget: " + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline. The pipeline consists of * the ```RemindersPublisher```, followed by - * the ```reminder -> “Don’t forget: ” + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) - -* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline. The pipeline consists of + * the ```reminder -> "Don't forget: " + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) + +* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline. The pipeline consists of * the ```RemindersPublisher```, * the **Stage 2** operation, and finally - * the ```strIn -> LocalDateTime.now().toString() + “: ”+ strIn``` operation, which timestamps the **Stage 2** output string. - + * the ```strIn -> LocalDateTime.now().toString() + ": "+ strIn``` operation, which timestamps the **Stage 2** output string. + Although we "ran" the Assembly phase code, all it did was build up the structure of your program, not run it. In the **Subscribe phase** you execute the pipeline that you defined in the Assembly phase. Here is how that works. You call ```java @@ -94,7 +94,7 @@ and * The ```RemindersPublisher``` instance reads the ```Subscription``` details and responds by pushing an event into the pipeline every time there is a new reminder. The ```RemindersPublisher``` will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the ```Subscription``` (which is infinity in this case, so the ```Publisher``` will just keep going.) -When I say that the ```RemindersPublisher``` "pushes events into the pipeline", I mean that the ```RemindersPublisher``` issues an ```onNext``` signal to the second pipeline stage (```.flatMap(reminder -> “Don’t forget: ” + reminder)```) paired with a ```String``` argument containing the reminder. ```flatMap()``` responds to an ```onNext``` signal by taking the ```String``` data passed in and applying the transformation that is in ```flatMap()```'s argument parentheses to the input data (in this case, by prepending the words “Don’t forget: ”). This signal propagates down the pipeline: pipeline Stage 2 issues an ```onNext``` signal to pipeline Stage 3 (```.flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn)```) with its output as the argument; and then pipeline Stage 3 issues its own output along with an ```onNext``` signal. +When I say that the ```RemindersPublisher``` "pushes events into the pipeline", I mean that the ```RemindersPublisher``` issues an ```onNext``` signal to the second pipeline stage (```.flatMap(reminder -> "Don't forget: " + reminder)```) paired with a ```String``` argument containing the reminder. ```flatMap()``` responds to an ```onNext``` signal by taking the ```String``` data passed in and applying the transformation that is in ```flatMap()```'s argument parentheses to the input data (in this case, by prepending the words "Don't forget: "). This signal propagates down the pipeline: pipeline Stage 2 issues an ```onNext``` signal to pipeline Stage 3 (```.flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn)```) with its output as the argument; and then pipeline Stage 3 issues its own output along with an ```onNext``` signal. Now what happens after pipeline Stage 3 is different – the ```onNext``` signal reached the last pipeline stage, so what happens to the final-stage ```onNext``` signal and its associated ```String``` argument? The answer is that when you called ```subscribe()```, ```subscribe()``` also created a ```Subscriber``` instance which implements a method for handling ```onNext``` signals and serves as the last stage of the pipeline. The ```Subscriber```'s ```onNext``` handler will call whatever code you wrote in the argument parentheses of ```subscribe()```, allowing you to customize for your application. In the Subscribe phase snippet above, we called @@ -106,11 +106,11 @@ which means that every time an ```onNext``` signal reaches the end of the operat In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you to call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. -That was a lot. So let’s step back for a moment and mention a few key points. +That was a lot. So let's step back for a moment and mention a few key points. * Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the ```Subscriber```. * Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops. * This approach enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. -* ```subscribe()``` is Reactor’s built-in ```Subscription``` generator, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. +* ```subscribe()``` is Reactor's built-in ```Subscription``` generator, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. And the most important takeaway: **Nothing happens until you subscribe.** @@ -120,9 +120,9 @@ The ```Subscriber``` and ```Publisher``` are independent entities; just because ```java Flux reminderPipeline = - Flux.just(“Wash the dishes”,“Mow the lawn”,”Sleep”) // Publisher, 3 events - .flatMap(reminder -> “Don’t forget: ” + reminder) - .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); // Nothing executed yet + Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 events + .flatMap(reminder -> "Don't forget: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet ``` ```Flux.just()``` is a [Reactor factory method](https://projectreactor.io/docs/core/release/reference/) which contrives to create a custom ```Publisher``` based on its input arguments. You could fully customize your ```Publisher``` implementation by writing a class that implements ```Publisher```; that is outside the scope of this discussion. The output of ```Flux.just()``` in the example above is a ```Publisher``` which will immediately and asynchronously push ```"Wash the dishes"```, ```"Mow the lawn"```, and ```"Sleep"``` into the pipeline as soon as it gets a ```Subscription```. Thus, upon subscription, @@ -133,7 +133,7 @@ reminderPipeline.subscribe(System.out::println); will output the three Strings shown and then end. -Suppose now we want to add two special behaviors to our program: (1) After all M Strings have been printed, print “End of reminders.” so the user knows we are finished. (2) Print the stack trace for any ```Exception```s which occur during execution. A modification to the ```subscribe()``` call handles all of this: +Suppose now we want to add two special behaviors to our program: (1) After all M Strings have been printed, print "End of reminders." so the user knows we are finished. (2) Print the stack trace for any ```Exception```s which occur during execution. A modification to the ```subscribe()``` call handles all of this: ```java reminderPipeline.subscribe(strIn -> { @@ -143,11 +143,11 @@ err -> { err.printStackTrace(); }, () -> { - System.out.println(“End of reminders.”); + System.out.println("End of reminders."); }); ``` -Let’s break this down. Remember we said that the argument to ```subscribe()``` determines how the ```Subscriber``` handles ```onNext```? I will mention two additional signals which Reactor uses to propagate status information along the pipeline: ```onComplete```, and ```onError```. Both signals denote completion of the Stream; only ```onComplete``` represents successful completion. The ```onError``` signal is associated with an ```Exception``` instance related to an error; the ```onComplete``` signal has no associated data. +Let's break this down. Remember we said that the argument to ```subscribe()``` determines how the ```Subscriber``` handles ```onNext```? I will mention two additional signals which Reactor uses to propagate status information along the pipeline: ```onComplete```, and ```onError```. Both signals denote completion of the Stream; only ```onComplete``` represents successful completion. The ```onError``` signal is associated with an ```Exception``` instance related to an error; the ```onComplete``` signal has no associated data. As it turns out, we can supply additional code to ```subscribe()``` in the form of Java 8 lambdas and handle ```onComplete``` and ```onError``` as well as ```onNext```! Picking apart the code snippet above, @@ -160,8 +160,8 @@ For the special cases of M=0 and M=1 for the ```Publisher```, Reactor provides a ```java Mono reminderPipeline = Mono.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event - .flatMap(reminder -> “Act now: ” + reminder) - .flatMap(strIn -> LocalDateTime.now().toString() + “: ”+ strIn); + .flatMap(reminder -> "Act now: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); ``` Again, ```Mono.just()``` is a Reactor factory method which creates the single-event publisher. This ```Publisher``` will push its argument into the Reactive Stream pipeline with an ```onNext``` signal and then optionally issue an ```onComplete``` signal indicating completion. diff --git a/reactor-rxjava-guide.md b/reactor-rxjava-guide.md new file mode 100644 index 0000000..972ac86 --- /dev/null +++ b/reactor-rxjava-guide.md @@ -0,0 +1,119 @@ +# Reactor vs RxJava guide + +The purpose of this guide is to help those who are more familiar with the RxJava framework to familiarize themselves with the Reactor framework and Azure Cosmos DB Java SDK 4.0 for Core (SQL) API ("Java SDK 4.0" from here on out.) + +Users of Async Java SDK 2.x.x should read this guide to understand how familiar async tasks can be performed in Reactor. We recommend first reading the [Reactor pattern guide](reactor-pattern-guide.md) for more general Reactor introduction. + +A quick refresher on Java SDK versions: + +| Java SDK | Release Date | Bundled APIs | Maven Jar | Java package name |API Reference | Release Notes | +|-------------------------|--------------|----------------------|-----------------------------------------|----------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| +| Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | com.microsoft.azure.cosmosdb.rx | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | +| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | com.microsoft.azure.cosmosdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | +| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | com.azure.data.cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | com.azure.cosmos | - | - | + +## Background + +[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for Azure's async Java SDKs going forward. + +A Reactive Streams framework implements the Reactive Streams Standard for specific programming languages. + +The [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM) framework was the basis of past Azure Java SDKs, but will not be going forward. Async Java SDK 2.x.x was implemented using RxJava 1; in this guide we will assume that RxJava 1 is the version you are already familiar with i.e. as a result of working with the Async Java SDK 2.x.x. + +[Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. + +## Comparison between Reactor and RxJava + +RxJava 1 provides a framework for implementing the **Observer Pattern** in your application. In the Observer Pattern, +* ```Observable```s are entities that receive events and data (i.e. UI, keyboard, TCP, ...) from outside sources, and make those events and data available to your program. +* ```Observer```s are the entities which subscribe to the Observable events and data. + +The [Reactor pattern guide](reactor-pattern-guide.md) gives a brief conceptual overview of Reactor. In summary: +* ```Publisher```s are the entities which make events and data from outside sources available to the program +* ```Subscriber```s subscribe to the events and data from the ```Publisher``` + +Both frameworks facilitate asynchronous, event-driven programming. Both frameworks allow you to chain together a pipeline of operations between Observable/Observer or Publisher/Subscriber. + +Roughly, what you would use an ```Observable``` for in RxJava, you would use a ```Flux``` for in Reactor. And what you would use a ```Single``` for in RxJava, you would use a ```Mono``` for in Reactor. + +The critical difference between the two frameworks is really in the core implementation: +Reactor operates a service which receives event/data pairs serially from a ```Publisher```, demultiplexes them, and forwards them to registered ```Subscribers```. This model was design help servers efficiently dispatch requests in a distributed system. +The RxJava approach is more general-purpose. ```Observer```s subscribe directly to the ```Observable``` and the ```Observable``` sends events and data directly to ```Observer```s, with no central service handling dispatch. + +### Summary: rules of thumb to convert RxJava code into Reactor code + +* An RxJava ```Observable``` will become a Reactor ```Flux``` + +* An RxJava ```Single``` will become a Reactor ```Mono``` + +* An RxJava ```Subscriber``` is still a ```Subscriber``` in Reactor + +* Operators such as ```map()```, ```filter()```, and ```flatMap()``` are the same + +## Examples of tasks in Reactor and RxJava + +* Reminder app example from the [Reactor pattern guide](reactor-pattern-guide.md) + +**Reactor:** +```java +ReminderAsyncService.getRemindersPublisher() // Pipeline Stage 1 + .flatMap(reminder -> "Don't forget: " + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Stage 3 + .subscribe(System.out::println); +``` + +**RxJava:** +```java +ReminderAsyncService.getRemindersObservable() // Pipeline Stage 1 + .flatMap(reminder -> "Don't forget: " + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Stage 3 + .subscribe(item -> System.out.println(item)); +``` + +* Three-event ```Publisher``` example from the [Reactor pattern guide](reactor-pattern-guide.md) + +**Reactor:** +```java +Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 events + .flatMap(reminder -> "Don't forget: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet + .subscribe(strIn -> { + System.out.println(strIn); + }, + err -> { + err.printStackTrace(); + }, + () -> { + System.out.println("End of reminders."); +}); +``` + +**RxJava:** +```java +Observable.just("Wash the dishes","Mow the lawn","Sleep") // Observable, 3 events + .flatMap(reminder -> "Don't forget: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet + .subscribe(strIn -> System.out.println(strIn), + err -> err.printStackTrace(), + () -> System.out.println("End of reminders.") +); +``` + +* Mono example from the [Reactor pattern guide](reactor-pattern-guide.md) + +**Reactor:** +```java +Mono.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event + .flatMap(reminder -> "Act now: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); + .subscribe(System.out::println); +``` + +**RxJava:** +```java +Single.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event + .flatMap(reminder -> "Act now: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); + .subscribe(item -> System.out.println(item)); +``` From a3bd60545bff60a0f18a4fe3aa5eb65efd20e5ad Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 03:57:30 -0700 Subject: [PATCH 096/110] small fix --- reactor-rxjava-guide.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/reactor-rxjava-guide.md b/reactor-rxjava-guide.md index 972ac86..ddb1d83 100644 --- a/reactor-rxjava-guide.md +++ b/reactor-rxjava-guide.md @@ -71,15 +71,15 @@ ReminderAsyncService.getRemindersObservable() // Pipeline Stage 1 .subscribe(item -> System.out.println(item)); ``` -* Three-event ```Publisher``` example from the [Reactor pattern guide](reactor-pattern-guide.md) +* Three-event ```Publisher``` example f"om 'he [Reacto" pattern guide](reactor-pattern-guide.md) **Reactor:** ```java -Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 events +Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 e"en"s .flatMap(reminder -> "Don't forget: " + reminder) .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet - .subscribe(strIn -> { - System.out.println(strIn); + .subscribe(strIn -> {"" + System.out.println(strIn); }, err -> { err.printStackTrace(); @@ -110,10 +110,10 @@ Mono.just("Are you sure you want to cancel your Reminders service?") // Publishe .subscribe(System.out::println); ``` -**RxJava:** +**RxJava:**' ```java Single.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event .flatMap(reminder -> "Act now: " + reminder) .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); .subscribe(item -> System.out.println(item)); -``` +``` \ No newline at end of file From 570af8a40764aa44bc060d1a61c5533a323ce6ff Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 14:20:01 -0700 Subject: [PATCH 097/110] Typo --- reactor-rxjava-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reactor-rxjava-guide.md b/reactor-rxjava-guide.md index ddb1d83..7f03086 100644 --- a/reactor-rxjava-guide.md +++ b/reactor-rxjava-guide.md @@ -38,7 +38,7 @@ Both frameworks facilitate asynchronous, event-driven programming. Both framewor Roughly, what you would use an ```Observable``` for in RxJava, you would use a ```Flux``` for in Reactor. And what you would use a ```Single``` for in RxJava, you would use a ```Mono``` for in Reactor. The critical difference between the two frameworks is really in the core implementation: -Reactor operates a service which receives event/data pairs serially from a ```Publisher```, demultiplexes them, and forwards them to registered ```Subscribers```. This model was design help servers efficiently dispatch requests in a distributed system. +Reactor operates a service which receives event/data pairs serially from a ```Publisher```, demultiplexes them, and forwards them to registered ```Subscribers```. This model was designed to help servers efficiently dispatch requests in a distributed system. The RxJava approach is more general-purpose. ```Observer```s subscribe directly to the ```Observable``` and the ```Observable``` sends events and data directly to ```Observer```s, with no central service handling dispatch. ### Summary: rules of thumb to convert RxJava code into Reactor code @@ -116,4 +116,4 @@ Single.just("Are you sure you want to cancel your Reminders service?") // Publis .flatMap(reminder -> "Act now: " + reminder) .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); .subscribe(item -> System.out.println(item)); -``` \ No newline at end of file +``` From 4317826c049089bd10cee3aa906f39290abe7a34 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 14:24:35 -0700 Subject: [PATCH 098/110] Removed quotes --- migration-guide.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/migration-guide.md b/migration-guide.md index 39e0143..7f9fd0e 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -54,7 +54,7 @@ In version 2.x.x Java SDKs, all operations on resources and documents are perfor In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writing and reading documents from Azure Cosmos DB. -In Java SDK 3.x.x ```CosmosItemProperties"`` 'as exposed"by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. +In Java SDK 3.x.x ```CosmosItemProperties``` was exposed by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. """" ### Imports @@ -75,10 +75,10 @@ In Java SDK 3.x.x ```CosmosItemProperties"`` 'as exposed"by the public API and s ### Accessors -Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing in"ta"ce members. +Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing instance members. * Example: a ```CosmosContainer``` instance has ```container.getId()``` and ```container.setId()``` methods. -This is different from Java SDK 3.x.x which exposes a f"ue"t interface. +This is different from Java SDK 3.x.x which exposes a fluent interface. Example: a ```CosmosSyncContainer``` instance has ```container.id()``` which is overloaded to get or set ```id```. ## Code snippet comparisons @@ -94,7 +94,7 @@ defaultPolicy.setPreferredLocations(Lists.newArrayList("Your Account Location")) // Use Direct Mode for best performance defaultPolicy.setConnectionMode(ConnectionMode.DIRECT); -// Create Async client.""""""''""""""'' +// Create Async client. // Building an async client is still a sync operation. client = new CosmosClientBuilder() .setEndpoint("your.hostname") @@ -120,8 +120,8 @@ client.createDatabaseIfNotExists("YourDatabaseName") ``` **Java SDK 3.x.x Async API:** -"""""""""""" -```java""'""' + +```java ConnectionPolicy defaultPolicy = ConnectionPolicy.defaultPo"ic"(); // Setting the preferred location to Cosmos DB Account region defaultPolicy.preferredLocations(Lists.newArrayList("Your Account Location")); @@ -133,7 +133,7 @@ client = new CosmosClientBuilder() .key("yourmasterkey") .connectionPolicy(defaultPolicy) .consistencyLevel(ConsistencyLevel.EVENTUAL) - .build();"""" + .build(); // Create database with specified name client.createDatabaseIfNotExists("YourDatabaseName") @@ -147,7 +147,7 @@ client.createDatabaseIfNotExists("YourDatabaseName") }).flatMap(containerResponse -> { container = containerResponse.container(); return Mono.empty(); -}).'ubscribe(); +}).subscribe(); ``` ### Item operations @@ -160,8 +160,8 @@ int number_of_docs = 50000; ArrayList docs = generateManyDocs(number_of_docs); // Insert many docs into container... -Flux.fromIterable(docs)"""" - .flatMap(doc -> container.createItem(doc))"""" +Flux.fromIterable(docs) + .flatMap(doc -> container.createItem(doc)) .subscribe(); // ...Subscribing triggers stream execution. ``` From 063359e9d48d1801042a1b1db8004237a970ac5d Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 30 Mar 2020 14:25:31 -0700 Subject: [PATCH 099/110] Quotes --- migration-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/migration-guide.md b/migration-guide.md index 7f9fd0e..8edc5ed 100644 --- a/migration-guide.md +++ b/migration-guide.md @@ -55,7 +55,7 @@ In version 2.x.x Java SDKs, all operations on resources and documents are perfor In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writing and reading documents from Azure Cosmos DB. In Java SDK 3.x.x ```CosmosItemProperties``` was exposed by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. -"""" + ### Imports * Java SDK 4.0 packages begin with ```com.azure.cosmos``` From 37f83652e472f4d0195cce26c64d1d88ba6318e3 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Thu, 9 Apr 2020 12:20:10 -0700 Subject: [PATCH 100/110] Reproduced M issue --- .../async/SampleCRUDQuickstartAsync.java | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index eead774..e9fcfc3 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -20,6 +20,7 @@ import com.azure.cosmos.models.CosmosAsyncDatabaseResponse; import com.azure.cosmos.models.CosmosAsyncItemResponse; import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; import com.azure.cosmos.models.FeedOptions; import com.azure.cosmos.models.PartitionKey; import com.google.common.collect.Lists; @@ -165,6 +166,22 @@ private void createContainerIfNotExists() throws Exception { }).block(); // + + //Modify existing container + + + Mono propertiesReplace = container.replace(containerProperties, new CosmosContainerRequestOptions()); + propertiesReplace.flatMap(containerResponse -> { + logger.info("setupContainer(): Container " + container.getId() + " in " + database.getId() + + "has been updated with it's new properties."); + return Mono.empty(); + }).onErrorResume((exception) -> { + logger.error("setupContainer(): Unable to update properties for container " + containerProperties.getId() + + " in database " + database.getId() + + ". e: " + exception.getLocalizedMessage()); + return Mono.empty(); + }).block(); + } private void createFamilies(Flux families) throws Exception { From fff179d24e9abb594fca22f25fa8017d9c73cb68 Mon Sep 17 00:00:00 2001 From: Kushagra Thapar Date: Thu, 9 Apr 2020 18:13:45 -0700 Subject: [PATCH 101/110] Updated code with cosmos container replace operation --- .../async/SampleCRUDQuickstartAsync.java | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java index e9fcfc3..81298b5 100644 --- a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -159,24 +159,19 @@ private void createContainerIfNotExists() throws Exception { Mono containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400); // Create container with 400 RU/s - containerIfNotExists.flatMap(containerResponse -> { - container = containerResponse.getContainer(); - logger.info("Checking container " + container.getId() + " completed!\n"); - return Mono.empty(); - }).block(); - + CosmosAsyncContainerResponse cosmosContainerResponse = containerIfNotExists.block(); + container = cosmosContainerResponse.getContainer(); // //Modify existing container - - + containerProperties = cosmosContainerResponse.getProperties(); Mono propertiesReplace = container.replace(containerProperties, new CosmosContainerRequestOptions()); propertiesReplace.flatMap(containerResponse -> { logger.info("setupContainer(): Container " + container.getId() + " in " + database.getId() + "has been updated with it's new properties."); return Mono.empty(); }).onErrorResume((exception) -> { - logger.error("setupContainer(): Unable to update properties for container " + containerProperties.getId() + + logger.error("setupContainer(): Unable to update properties for container " + container.getId() + " in database " + database.getId() + ". e: " + exception.getLocalizedMessage()); return Mono.empty(); From c49d38966bc1713d7fae9f0796beec539a6b209d Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Tue, 28 Apr 2020 09:55:41 -0700 Subject: [PATCH 102/110] Typos --- reactor-rxjava-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reactor-rxjava-guide.md b/reactor-rxjava-guide.md index 7f03086..01cdcc4 100644 --- a/reactor-rxjava-guide.md +++ b/reactor-rxjava-guide.md @@ -71,11 +71,11 @@ ReminderAsyncService.getRemindersObservable() // Pipeline Stage 1 .subscribe(item -> System.out.println(item)); ``` -* Three-event ```Publisher``` example f"om 'he [Reacto" pattern guide](reactor-pattern-guide.md) +* Three-event ```Publisher``` example from the [Reactor pattern guide](reactor-pattern-guide.md) **Reactor:** ```java -Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 e"en"s +Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 events .flatMap(reminder -> "Don't forget: " + reminder) .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet .subscribe(strIn -> {"" From abcdb15c2b1ea1cb7db9125bf361227d9e179a3e Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 11 May 2020 04:04:55 -0700 Subject: [PATCH 103/110] Created DatabaseCRUDQuickstart.java --- .../sync/CollectionCRUDQuickstartSync.java | 4 + .../sync/DatabaseCRUDQuickstart.java | 137 +++++++ .../sync/DocumentCRUDQuickstartSync.java | 4 + .../queries/sync/QueriesQuickstartSync.java | 4 + .../sync/UserManagementQuickstartSync.java | 4 + .../workedappexample/SampleGroceryStore.java | 338 ------------------ 6 files changed, 153 insertions(+), 338 deletions(-) create mode 100644 src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java create mode 100644 src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java create mode 100644 src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java create mode 100644 src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java create mode 100644 src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java delete mode 100644 src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java diff --git a/src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java new file mode 100644 index 0000000..cef46c4 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java @@ -0,0 +1,4 @@ +package com.azure.cosmos.examples.collectioncrud.sync; + +public class CollectionCRUDQuickstartSync { +} diff --git a/src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java new file mode 100644 index 0000000..1a34cac --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.databasecrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosDatabaseProperties; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DatabaseCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + + private CosmosDatabase database; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following database CRUD operations: + * -Create + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + DatabaseCRUDQuickstart p = new DatabaseCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.databaseCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void databaseCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + readDatabaseById(); + readAllDatabases(); + // deleteADatabase() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Database read + private void readDatabaseById() throws Exception { + logger.info("Read database " + databaseName + " by ID."); + + // Read database by ID + database = client.getDatabase(databaseName); + + logger.info("Done."); + } + + // Database read all + private void readAllDatabases() throws Exception { + logger.info("Read all databases in the account."); + + // Read all databases in the account + CosmosPagedIterable databases = client.readAllDatabases(new FeedOptions()); + + // Print + String msg="Listing databases in account:\n"; + for(CosmosDatabaseProperties dbProps : databases) { + msg += String.format("-Database ID: %s\n",dbProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java new file mode 100644 index 0000000..59de052 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java @@ -0,0 +1,4 @@ +package com.azure.cosmos.examples.documentcrud.sync; + +public class DocumentCRUDQuickstartSync { +} diff --git a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java new file mode 100644 index 0000000..fff294d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java @@ -0,0 +1,4 @@ +package com.azure.cosmos.examples.queries.sync; + +public class QueriesQuickstartSync { +} diff --git a/src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java new file mode 100644 index 0000000..a9bb4fe --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java @@ -0,0 +1,4 @@ +package com.azure.cosmos.examples.usermanagement.sync; + +public class UserManagementQuickstartSync { +} diff --git a/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java b/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java deleted file mode 100644 index 701c0fb..0000000 --- a/src/main/java/com/azure/cosmos/examples/workedappexample/SampleGroceryStore.java +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -package com.azure.cosmos.examples.workedappexample; - -import com.azure.cosmos.ChangeFeedProcessor; -import com.azure.cosmos.ConnectionPolicy; -import com.azure.cosmos.ConsistencyLevel; -import com.azure.cosmos.CosmosAsyncContainer; -import com.azure.cosmos.CosmosAsyncDatabase; -import com.azure.cosmos.CosmosClientBuilder; -import com.azure.cosmos.CosmosClientException; -import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.CosmosPagedFlux; -import com.azure.cosmos.examples.common.AccountSettings; -import com.azure.cosmos.examples.common.Family; -import com.azure.cosmos.implementation.Utils; -import com.azure.cosmos.models.CosmosAsyncContainerResponse; -import com.azure.cosmos.models.CosmosContainerProperties; -import com.azure.cosmos.models.CosmosContainerRequestOptions; -import com.azure.cosmos.models.FeedOptions; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import reactor.core.scheduler.Schedulers; - -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.time.Duration; -import java.util.Date; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -/** - * End-to-end application example code using Change Feed. - * - * This sample application inserts grocery store inventory data into an Azure Cosmos DB container; - * meanwhile, Change Feed runs in the background building a materialized view - * based on each document update. - * - * The materialized view facilitates efficient queries over item type. - * - */ -public class SampleGroceryStore { - - public static int WAIT_FOR_WORK = 60000; - public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); - public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); - private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); - protected static Logger logger = LoggerFactory.getLogger(SampleGroceryStore.class.getSimpleName()); - - - private static ChangeFeedProcessor changeFeedProcessorInstance; - private static boolean isWorkCompleted = false; - - private static CosmosAsyncContainer typeContainer; - - public static void main (String[]args) { - logger.info("BEGIN Sample"); - - try { - - System.out.println("Press enter to create the grocery store inventory system..."); - - System.out.println("-->CREATE DocumentClient"); - CosmosAsyncClient client = getCosmosClient(); - - System.out.println("-->CREATE Contoso Grocery Store database: " + DATABASE_NAME); - CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); - - System.out.println("-->CREATE container for store inventory: " + COLLECTION_NAME); - CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME, "/id"); - - System.out.println("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); - CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); - - System.out.println("-->CREATE container for materialized view partitioned by 'type': " + COLLECTION_NAME + "-leases"); - typeContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME + "-pktype", "/type"); - - System.out.println("Press enter to add items to the grocery store inventory system..."); - - changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); - changeFeedProcessorInstance.start() - .subscribeOn(Schedulers.elastic()) - .doOnSuccess(aVoid -> { - //Insert 10 documents into the feed container - //createNewDocumentsJSON demonstrates how to insert a JSON object into a Cosmos DB container as an item - createNewDocumentsJSON(feedContainer, 10, Duration.ofSeconds(3)); - isWorkCompleted = true; - }) - .subscribe(); - - long remainingWork = WAIT_FOR_WORK; - while (!isWorkCompleted && remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - - if (isWorkCompleted) { - if (changeFeedProcessorInstance != null) { - changeFeedProcessorInstance.stop().subscribe(); - } - } else { - throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); - } - - System.out.println("Press enter to query the materialized view..."); - - queryItems("SELECT * FROM c WHERE c.type IN ('milk','pens')", typeContainer); - - System.out.println("Press enter to clean up & exit the sample code..."); - - System.out.println("-->DELETE sample's database: " + DATABASE_NAME); - deleteDatabase(cosmosDatabase); - - Thread.sleep(500); - - } catch (Exception e) { - e.printStackTrace(); - } - - System.out.println("END Sample"); - } - - public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { - return ChangeFeedProcessor.changeFeedProcessorBuilder() - .setHostName(hostName) - .setFeedContainer(feedContainer) - .setLeaseContainer(leaseContainer) - .setHandleChanges((List docs) -> { - for (JsonNode document : docs) { - //Duplicate each document update from the feed container into the materialized view container - updateInventoryTypeMaterializedView(document); - } - - }) - .build(); - } - - private static void updateInventoryTypeMaterializedView(JsonNode document) { - typeContainer.createItem(document).subscribe(); - } - - public static CosmosAsyncClient getCosmosClient() { - - return new CosmosClientBuilder() - .setEndpoint(AccountSettings.HOST) - .setKey(AccountSettings.MASTER_KEY) - .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) - .setConsistencyLevel(ConsistencyLevel.EVENTUAL) - .buildAsyncClient(); - } - - public static CosmosAsyncDatabase createNewDatabase(CosmosAsyncClient client, String databaseName) { - return client.createDatabaseIfNotExists(databaseName).block().getDatabase(); - } - - public static void deleteDatabase(CosmosAsyncDatabase cosmosDatabase) { - cosmosDatabase.delete().block(); - } - - public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, String databaseName, String collectionName, String partitionKey) { - CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); - CosmosAsyncContainer collectionLink = databaseLink.getContainer(collectionName); - CosmosAsyncContainerResponse containerResponse = null; - - try { - containerResponse = collectionLink.read().block(); - - if (containerResponse != null) { - throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); - } - } catch (RuntimeException ex) { - if (ex instanceof CosmosClientException) { - CosmosClientException cosmosClientException = (CosmosClientException) ex; - - if (cosmosClientException.getStatusCode() != 404) { - throw ex; - } - } else { - throw ex; - } - } - - CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, partitionKey); - CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); - containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); - - if (containerResponse == null) { - throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); - } - - return containerResponse.getContainer(); - } - - public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient client, String databaseName, String leaseCollectionName) { - CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); - CosmosAsyncContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); - CosmosAsyncContainerResponse leaseContainerResponse = null; - - try { - leaseContainerResponse = leaseCollectionLink.read().block(); - - if (leaseContainerResponse != null) { - leaseCollectionLink.delete().block(); - - try { - Thread.sleep(1000); - } catch (InterruptedException ex) { - ex.printStackTrace(); - } - } - } catch (RuntimeException ex) { - if (ex instanceof CosmosClientException) { - CosmosClientException cosmosClientException = (CosmosClientException) ex; - - if (cosmosClientException.getStatusCode() != 404) { - throw ex; - } - } else { - throw ex; - } - } - - CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); - CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); - - leaseContainerResponse = databaseLink.createContainer(containerSettings, 400,requestOptions).block(); - - if (leaseContainerResponse == null) { - throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); - } - - return leaseContainerResponse.getContainer(); - } - - public static void createNewDocumentsJSON(CosmosAsyncContainer containerClient, int count, Duration delay) { - System.out.println("Creating documents\n"); - String suffix = RandomStringUtils.randomAlphabetic(10); - for (int i = 0; i <= count; i++) { - - String jsonString = "{\"id\" : \"" + String.format("0%d-%s", i, suffix) + "\"" - + "," - + "\"brand\" : \"" + ((char)(65+i)) + "\"" - + "," - + "\"type\" : \"" + ((char)(69+i)) + "\"" - + "," - + "\"expiryDate\" : \"" + "2020-03-" + StringUtils.leftPad(String.valueOf(5+i), 2, "0") + "\"" - + "}"; - - ObjectMapper mapper = new ObjectMapper(); - JsonNode document = null; - - try { - document = mapper.readTree(jsonString); - } catch (Exception e) { - e.printStackTrace(); - } - - containerClient.createItem(document).subscribe(doc -> { - System.out.println(".\n"); - }); - - long remainingWork = delay.toMillis(); - try { - while (remainingWork > 0) { - Thread.sleep(100); - remainingWork -= 100; - } - } catch (InterruptedException iex) { - // exception caught - break; - } - } - } - - public static void queryItems(String query, CosmosAsyncContainer container) { - - FeedOptions queryOptions = new FeedOptions(); - queryOptions.setMaxItemCount(10); - // Set populate query metrics to get metrics around query executions - queryOptions.setPopulateQueryMetrics(true); - - CosmosPagedFlux pagedFluxResponse = container.queryItems( - query, queryOptions, JsonNode.class); - - final CountDownLatch completionLatch = new CountDownLatch(1); - - pagedFluxResponse.byPage().subscribe( - fluxResponse -> { - logger.info("Got a page of query result with " + - fluxResponse.getResults().size() + " items(s)" - + " and request charge of " + fluxResponse.getRequestCharge()); - - /* - fluxResponse.getResults() - - logger.info("Item Ids " + fluxResponse - .getResults() - .stream() - .map(JsonNode::get("id")) - .collect(Collectors.toList())); - - */ - }, - err -> { - if (err instanceof CosmosClientException) { - //Client-specific errors - CosmosClientException cerr = (CosmosClientException) err; - cerr.printStackTrace(); - logger.error(String.format("Read Item failed with %s\n", cerr)); - } else { - //General errors - err.printStackTrace(); - } - - completionLatch.countDown(); - }, - () -> { - completionLatch.countDown(); - } - ); - - try { - completionLatch.await(); - } catch (InterruptedException err) { - throw new AssertionError("Unexpected Interruption", err); - } - } - -} From d771500a5c40ad7fa52e45b8c518f1a85055c2d8 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 11 May 2020 04:25:36 -0700 Subject: [PATCH 104/110] Added container CRUD sample --- .../sync/CollectionCRUDQuickstartSync.java | 4 - .../sync/ContainerCRUDQuickstart.java | 180 ++++++++++++++++++ 2 files changed, 180 insertions(+), 4 deletions(-) delete mode 100644 src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java create mode 100644 src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java diff --git a/src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java deleted file mode 100644 index cef46c4..0000000 --- a/src/main/java/com/azure/cosmos/examples/collectioncrud/sync/CollectionCRUDQuickstartSync.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.azure.cosmos.examples.collectioncrud.sync; - -public class CollectionCRUDQuickstartSync { -} diff --git a/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java new file mode 100644 index 0000000..0982956 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java @@ -0,0 +1,180 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.containercrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ContainerCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following container CRUD operations: + * -Create + * -Update throughput + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + ContainerCRUDQuickstart p = new ContainerCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.containerCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void containerCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + readContainerById(); + readAllContainers(); + // deleteAContainer() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 400 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + // Update container throughput + private void updateContainerThroughput() throws Exception { + logger.info("Update throughput for container " + containerName + "."); + + // Specify new throughput value + container.replaceProvisionedThroughput(400); + + logger.info("Done."); + } + + // Container read + private void readContainerById() throws Exception { + logger.info("Read container " + containerName + " by ID."); + + // Read container by ID + container = database.getContainer(containerName); + + logger.info("Done."); + } + + // Container read all + private void readAllContainers() throws Exception { + logger.info("Read all containers in database " + databaseName + "."); + + // Read all containers in the account + CosmosPagedIterable containers = database.readAllContainers(new FeedOptions()); + + // Print + String msg="Listing containers in database:\n"; + for(CosmosContainerProperties containerProps : containers) { + msg += String.format("-Container ID: %s\n",containerProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Container delete + private void deleteAContainer() throws Exception { + logger.info("Last step: delete container " + containerName + " by ID."); + + // Delete container + CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); + logger.info("Status code for container delete: {}",containerResp.getStatusCode()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteAContainer(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} From 5f2bdec717951061e94b90445e60b4d28537349f Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 11 May 2020 07:22:41 -0700 Subject: [PATCH 105/110] Document CRUD samples --- .../azure/cosmos/examples/common/Address.java | 6 +- .../azure/cosmos/examples/common/Family.java | 14 +- .../sync/ContainerCRUDQuickstart.java | 5 +- .../sync/DocumentCRUDQuickstart.java | 341 ++++++++++++++++++ .../sync/DocumentCRUDQuickstartSync.java | 4 - 5 files changed, 354 insertions(+), 16 deletions(-) create mode 100644 src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java delete mode 100644 src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java diff --git a/src/main/java/com/azure/cosmos/examples/common/Address.java b/src/main/java/com/azure/cosmos/examples/common/Address.java index ec7d5b3..9abbf3f 100644 --- a/src/main/java/com/azure/cosmos/examples/common/Address.java +++ b/src/main/java/com/azure/cosmos/examples/common/Address.java @@ -28,8 +28,8 @@ public void setCity(String city) { this.city = city; } - private String state; - private String county; - private String city; + private String state=""; + private String county=""; + private String city=""; } diff --git a/src/main/java/com/azure/cosmos/examples/common/Family.java b/src/main/java/com/azure/cosmos/examples/common/Family.java index 4e337b3..9a3c389 100644 --- a/src/main/java/com/azure/cosmos/examples/common/Family.java +++ b/src/main/java/com/azure/cosmos/examples/common/Family.java @@ -63,12 +63,12 @@ public void setRegistered(boolean isRegistered) { this.isRegistered = isRegistered; } - private String id; - private String lastName; - private String district; - private Parent[] parents; - private Child[] children; - private Address address; - private boolean isRegistered; + private String id=""; + private String lastName=""; + private String district=""; + private Parent[] parents={}; + private Child[] children={}; + private Address address=new Address(); + private boolean isRegistered=false; } diff --git a/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java index 0982956..771679e 100644 --- a/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java @@ -91,6 +91,7 @@ private void createDatabaseIfNotExists() throws Exception { logger.info("Done."); } + // Container create private void createContainerIfNotExists() throws Exception { logger.info("Create container " + containerName + " if not exists."); @@ -98,7 +99,7 @@ private void createContainerIfNotExists() throws Exception { CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); - // Create container with 400 RU/s + // Create container with 200 RU/s container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); logger.info("Done."); @@ -143,7 +144,7 @@ private void readAllContainers() throws Exception { // Container delete private void deleteAContainer() throws Exception { - logger.info("Last step: delete container " + containerName + " by ID."); + logger.info("Delete container " + containerName + " by ID."); // Delete container CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); diff --git a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java new file mode 100644 index 0000000..8e0fade --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java @@ -0,0 +1,341 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.documentcrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.implementation.http.HttpResponse; +import com.azure.cosmos.models.AccessCondition; +import com.azure.cosmos.models.AccessConditionType; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.UUID; + +public class DocumentCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + private final String documentId = UUID.randomUUID().toString(); + private final String documentLastName = "Witherspoon"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following document CRUD operations: + * -Create + * -Read by ID + * -Read all + * -Query + * -Replace + * -Upsert + * -Replace with conditional ETag check + * -Read document only if document has changed + * -Delete + */ + public static void main(String[] args) { + DocumentCRUDQuickstart p = new DocumentCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.documentCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void documentCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + createDocument(); + readDocumentById(); + readAllDocumentsInContainer(); + queryDocuments(); + replaceDocument(); + upsertDocument(); + replaceDocumentWithConditionalEtagCheck(); + readDocumentOnlyIfChanged(); + // deleteDocument() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + private void createDocument() throws Exception { + logger.info("Create document " + documentId); + + // Define a document as a POJO (internally this + // is converted to JSON via custom serialization) + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + + // Insert this item as a document + // Explicitly specifying the /pk value improves performance. + container.createItem(family,new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Document read + private void readDocumentById() throws Exception { + logger.info("Read document " + documentId + " by ID."); + + // Read document by ID + Family family = container.readItem(documentId,new PartitionKey(documentLastName),Family.class).getResource(); + + // Check result + logger.info("Finished reading family " + family.getId() + " with partition key " + family.getLastName()); + + logger.info("Done."); + } + + // Container read all + private void readAllDocumentsInContainer() throws Exception { + logger.info("Read all documents in container " + containerName + "."); + + // Read all documents in the container + CosmosPagedIterable families = container.readAllItems(new FeedOptions(),Family.class); + + // Print + String msg="Listing documents in container:\n"; + for(Family family : families) { + msg += String.format("-Family (/id,partition key)): (%s,%s)\n",family.getId(),family.getLastName()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + private void queryDocuments() throws Exception { + logger.info("Query documents in the container " + containerName + "."); + + String sql = "SELECT * FROM c WHERE c.lastName = 'Witherspoon'"; + + CosmosPagedIterable filteredFamilies = container.queryItems(sql, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + + private void replaceDocument() throws Exception { + logger.info("Replace document " + documentId); + + // Replace existing document with new modified document + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + family.setDistrict("Columbia"); // Document modification + + CosmosItemResponse famResp = + container.replaceItem(family, family.getId(), new PartitionKey(family.getLastName()), new CosmosItemRequestOptions()); + + logger.info("Request charge of replace operation: {} RU", famResp.getRequestCharge()); + + logger.info("Done."); + } + + private void upsertDocument() throws Exception { + logger.info("Replace document " + documentId); + + // Replace existing document with new modified document (contingent on modification). + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + family.setDistrict("Columbia"); // Document modification + + CosmosItemResponse famResp = + container.upsertItem(family, new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + private void replaceDocumentWithConditionalEtagCheck() throws Exception { + logger.info("Replace document " + documentId + ", employing optimistic concurrency using ETag."); + + // Obtained current document ETag + CosmosItemResponse famResp = + container.readItem(documentId, new PartitionKey(documentLastName), Family.class); + String etag = famResp.getResponseHeaders().get("etag"); + + logger.info("Read document " + documentId + " to obtain current ETag: " + etag); + + // Modify document + Family family = famResp.getResource(); + family.setRegistered(!family.isRegistered()); + + // Persist the change back to the server, updating the ETag in the process + // This models a concurrent change made to the document + CosmosItemResponse updatedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + logger.info("'Concurrent' update to document " + documentId + " so ETag is now " + updatedFamResp.getResponseHeaders().get("etag")); + + // Now update the document and call replace with the AccessCondition requiring that ETag has not changed. + // This should fail because the "concurrent" document change updated the ETag. + try { + AccessCondition ac = new AccessCondition(); + ac.setType(AccessConditionType.IF_MATCH); + ac.setCondition(etag); + + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.setAccessCondition(ac); + + family.setDistrict("Seafood"); + + CosmosItemResponse failedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),requestOptions); + + } catch (CosmosClientException cce) { + logger.info("As expected, we have a pre-condition failure exception\n"); + } + + logger.info("Done."); + } + + private void readDocumentOnlyIfChanged() throws Exception { + logger.info("Read document " + documentId + " only if it has been changed, utilizing an ETag check."); + + // Read document + CosmosItemResponse famResp = + container.readItem(documentId, new PartitionKey(documentLastName), Family.class); + logger.info("Read doc with status code of {}", famResp.getStatusCode()); + + // Re-read but with conditional access requirement that ETag has changed. + // This should fail. + + String etag = famResp.getResponseHeaders().get("etag"); + AccessCondition ac = new AccessCondition(); + ac.setType(AccessConditionType.IF_NONE_MATCH); + ac.setCondition(etag); + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.setAccessCondition(ac); + + CosmosItemResponse failResp = + container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + + logger.info("Re-read doc with status code of {} (we anticipate failure due to ETag not having changed.)", failResp.getStatusCode()); + + // Replace the doc with a modified version, which will update ETag + Family family = famResp.getResource(); + family.setRegistered(!family.isRegistered()); + CosmosItemResponse failedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + logger.info("Modified and replaced the doc (updates ETag.)"); + + // Re-read doc again, with conditional acccess requirements. + // This should succeed since ETag has been updated. + CosmosItemResponse succeedResp = + container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + logger.info("Re-read doc with status code of {} (we anticipate success due to ETag modification.)", succeedResp.getStatusCode()); + + logger.info("Done."); + } + + // Document delete + private void deleteADocument() throws Exception { + logger.info("Delete document " + documentId + " by ID."); + + // Delete document + container.deleteItem(documentId, new PartitionKey(documentLastName), new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADocument(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java deleted file mode 100644 index 59de052..0000000 --- a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstartSync.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.azure.cosmos.examples.documentcrud.sync; - -public class DocumentCRUDQuickstartSync { -} From 876552f80d4b854f7b549360195c97f0b12e64a9 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Mon, 11 May 2020 07:31:51 -0700 Subject: [PATCH 106/110] SOW on queries --- .../sync/DocumentCRUDQuickstart.java | 4 - .../queries/sync/QueriesQuickstart.java | 346 ++++++++++++++++++ .../queries/sync/QueriesQuickstartSync.java | 4 - 3 files changed, 346 insertions(+), 8 deletions(-) create mode 100644 src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java delete mode 100644 src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java diff --git a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java index 8e0fade..768956d 100644 --- a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java @@ -13,19 +13,15 @@ import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; import com.azure.cosmos.examples.common.AccountSettings; import com.azure.cosmos.examples.common.Family; -import com.azure.cosmos.implementation.http.HttpResponse; import com.azure.cosmos.models.AccessCondition; import com.azure.cosmos.models.AccessConditionType; import com.azure.cosmos.models.CosmosContainerProperties; -import com.azure.cosmos.models.CosmosContainerRequestOptions; -import com.azure.cosmos.models.CosmosContainerResponse; import com.azure.cosmos.models.CosmosDatabaseRequestOptions; import com.azure.cosmos.models.CosmosDatabaseResponse; import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.FeedOptions; import com.azure.cosmos.models.PartitionKey; -import io.netty.handler.codec.http.HttpResponseStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java new file mode 100644 index 0000000..ea637a3 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.queries.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.implementation.http.HttpResponse; +import com.azure.cosmos.models.AccessCondition; +import com.azure.cosmos.models.AccessConditionType; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.UUID; + +public class QueriesQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + private final String documentId = UUID.randomUUID().toString(); + private final String documentLastName = "Witherspoon"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate Azure Cosmos DB queries via Java SQL API, including queries for: + * -All documents + * -Equality using == + * -Inequality using != and NOT + * -Using range operators like >, <, >=, <= + * -Using range operators against Strings + * -With ORDER BY + * -With aggregate functions + * -With subdocuments + * -With intra-document joins + * -With String, math and array operators + * -With parameterized SQL using SqlQuerySpec + * -With explicit paging + * -Query partitioned collections in parallel + * -With ORDER BY for partitioned collections + */ + public static void main(String[] args) { + QueriesQuickstart p = new QueriesQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.queriesDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void queriesDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + createDocument(); + readDocumentById(); + readAllDocumentsInContainer(); + queryDocuments(); + replaceDocument(); + upsertDocument(); + replaceDocumentWithConditionalEtagCheck(); + readDocumentOnlyIfChanged(); + // deleteDocument() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + private void createDocument() throws Exception { + logger.info("Create document " + documentId); + + // Define a document as a POJO (internally this + // is converted to JSON via custom serialization) + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + + // Insert this item as a document + // Explicitly specifying the /pk value improves performance. + container.createItem(family,new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Document read + private void readDocumentById() throws Exception { + logger.info("Read document " + documentId + " by ID."); + + // Read document by ID + Family family = container.readItem(documentId,new PartitionKey(documentLastName),Family.class).getResource(); + + // Check result + logger.info("Finished reading family " + family.getId() + " with partition key " + family.getLastName()); + + logger.info("Done."); + } + + // Container read all + private void readAllDocumentsInContainer() throws Exception { + logger.info("Read all documents in container " + containerName + "."); + + // Read all documents in the container + CosmosPagedIterable families = container.readAllItems(new FeedOptions(),Family.class); + + // Print + String msg="Listing documents in container:\n"; + for(Family family : families) { + msg += String.format("-Family (/id,partition key)): (%s,%s)\n",family.getId(),family.getLastName()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + private void queryDocuments() throws Exception { + logger.info("Query documents in the container " + containerName + "."); + + String sql = "SELECT * FROM c WHERE c.lastName = 'Witherspoon'"; + + CosmosPagedIterable filteredFamilies = container.queryItems(sql, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + + private void replaceDocument() throws Exception { + logger.info("Replace document " + documentId); + + // Replace existing document with new modified document + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + family.setDistrict("Columbia"); // Document modification + + CosmosItemResponse famResp = + container.replaceItem(family, family.getId(), new PartitionKey(family.getLastName()), new CosmosItemRequestOptions()); + + logger.info("Request charge of replace operation: {} RU", famResp.getRequestCharge()); + + logger.info("Done."); + } + + private void upsertDocument() throws Exception { + logger.info("Replace document " + documentId); + + // Replace existing document with new modified document (contingent on modification). + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + family.setDistrict("Columbia"); // Document modification + + CosmosItemResponse famResp = + container.upsertItem(family, new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + private void replaceDocumentWithConditionalEtagCheck() throws Exception { + logger.info("Replace document " + documentId + ", employing optimistic concurrency using ETag."); + + // Obtained current document ETag + CosmosItemResponse famResp = + container.readItem(documentId, new PartitionKey(documentLastName), Family.class); + String etag = famResp.getResponseHeaders().get("etag"); + + logger.info("Read document " + documentId + " to obtain current ETag: " + etag); + + // Modify document + Family family = famResp.getResource(); + family.setRegistered(!family.isRegistered()); + + // Persist the change back to the server, updating the ETag in the process + // This models a concurrent change made to the document + CosmosItemResponse updatedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + logger.info("'Concurrent' update to document " + documentId + " so ETag is now " + updatedFamResp.getResponseHeaders().get("etag")); + + // Now update the document and call replace with the AccessCondition requiring that ETag has not changed. + // This should fail because the "concurrent" document change updated the ETag. + try { + AccessCondition ac = new AccessCondition(); + ac.setType(AccessConditionType.IF_MATCH); + ac.setCondition(etag); + + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.setAccessCondition(ac); + + family.setDistrict("Seafood"); + + CosmosItemResponse failedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),requestOptions); + + } catch (CosmosClientException cce) { + logger.info("As expected, we have a pre-condition failure exception\n"); + } + + logger.info("Done."); + } + + private void readDocumentOnlyIfChanged() throws Exception { + logger.info("Read document " + documentId + " only if it has been changed, utilizing an ETag check."); + + // Read document + CosmosItemResponse famResp = + container.readItem(documentId, new PartitionKey(documentLastName), Family.class); + logger.info("Read doc with status code of {}", famResp.getStatusCode()); + + // Re-read but with conditional access requirement that ETag has changed. + // This should fail. + + String etag = famResp.getResponseHeaders().get("etag"); + AccessCondition ac = new AccessCondition(); + ac.setType(AccessConditionType.IF_NONE_MATCH); + ac.setCondition(etag); + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.setAccessCondition(ac); + + CosmosItemResponse failResp = + container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + + logger.info("Re-read doc with status code of {} (we anticipate failure due to ETag not having changed.)", failResp.getStatusCode()); + + // Replace the doc with a modified version, which will update ETag + Family family = famResp.getResource(); + family.setRegistered(!family.isRegistered()); + CosmosItemResponse failedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + logger.info("Modified and replaced the doc (updates ETag.)"); + + // Re-read doc again, with conditional acccess requirements. + // This should succeed since ETag has been updated. + CosmosItemResponse succeedResp = + container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + logger.info("Re-read doc with status code of {} (we anticipate success due to ETag modification.)", succeedResp.getStatusCode()); + + logger.info("Done."); + } + + // Document delete + private void deleteADocument() throws Exception { + logger.info("Delete document " + documentId + " by ID."); + + // Delete document + container.deleteItem(documentId, new PartitionKey(documentLastName), new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADocument(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java deleted file mode 100644 index fff294d..0000000 --- a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstartSync.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.azure.cosmos.examples.queries.sync; - -public class QueriesQuickstartSync { -} From e3ac8c25c573a761cb867ee62ef19078a494914f Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 15 May 2020 04:55:48 -0700 Subject: [PATCH 107/110] Query samples --- .../queries/sync/QueriesQuickstart.java | 336 ++++++++++++------ 1 file changed, 218 insertions(+), 118 deletions(-) diff --git a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java index ea637a3..15cf790 100644 --- a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java +++ b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java @@ -24,11 +24,17 @@ import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlParameterList; +import com.azure.cosmos.models.SqlQuerySpec; +import com.fasterxml.jackson.databind.JsonNode; import io.netty.handler.codec.http.HttpResponseStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Iterator; import java.util.UUID; public class QueriesQuickstart { @@ -98,17 +104,53 @@ private void queriesDemo() throws Exception { createContainerIfNotExists(); createDocument(); - readDocumentById(); - readAllDocumentsInContainer(); - queryDocuments(); - replaceDocument(); - upsertDocument(); - replaceDocumentWithConditionalEtagCheck(); - readDocumentOnlyIfChanged(); + + queryAllDocuments(); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(new FeedOptions()); + queryEquality(); + queryInequality(); + queryRange(); + queryRangeAgainstStrings(); + queryOrderBy(); + queryWithAggregateFunctions(); + querySubdocuments(); + queryIntraDocumentJoin(); + queryStringMathAndArrayOperators(); + queryWithQuerySpec(); + parallelQueryWithPagingAndContinuationTokenAndPrintQueryCharge(); + // deleteDocument() is called at shutdown() } + private void executeQueryPrintSingleResult(String sql) { + logger.info("Execute query {}",sql); + + CosmosPagedIterable filteredFamilies = container.queryItems(sql, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + + private void executeQueryWithQuerySpecPrintSingleResult(SqlQuerySpec querySpec) { + logger.info("Execute query {}",querySpec.getQueryText()); + + CosmosPagedIterable filteredFamilies = container.queryItems(querySpec, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + // Database Create private void createDatabaseIfNotExists() throws Exception { logger.info("Create database " + databaseName + " if not exists..."); @@ -149,163 +191,221 @@ private void createDocument() throws Exception { logger.info("Done."); } - // Document read - private void readDocumentById() throws Exception { - logger.info("Read document " + documentId + " by ID."); + private void queryAllDocuments() throws Exception { + logger.info("Query all documents."); - // Read document by ID - Family family = container.readItem(documentId,new PartitionKey(documentLastName),Family.class).getResource(); + executeQueryPrintSingleResult("SELECT * FROM c"); + } - // Check result - logger.info("Finished reading family " + family.getId() + " with partition key " + family.getLastName()); + private void queryWithPagingAndContinuationTokenAndPrintQueryCharge(FeedOptions options) throws Exception { + logger.info("Query with paging and continuation token; print the total RU charge of the query"); - logger.info("Done."); - } + String query = "SELECT * FROM Families"; - // Container read all - private void readAllDocumentsInContainer() throws Exception { - logger.info("Read all documents in container " + containerName + "."); + int pageSize = 100; //No of docs per page + int currentPageNumber = 1; + int documentNumber = 0; + String continuationToken = null; - // Read all documents in the container - CosmosPagedIterable families = container.readAllItems(new FeedOptions(),Family.class); + double requestCharge = 0.0; - // Print - String msg="Listing documents in container:\n"; - for(Family family : families) { - msg += String.format("-Family (/id,partition key)): (%s,%s)\n",family.getId(),family.getLastName()); - } - logger.info(msg + "\n"); + // First iteration (continuationToken == null): Receive a batch of query response pages + // Subsequent iterations (continuationToken != null): Receive subsequent batch of query response pages, with continuationToken indicating where the previous iteration left off + do { + logger.info("Receiving a set of query response pages."); + logger.info("Continuation Token: " + continuationToken + "\n"); - logger.info("Done."); + FeedOptions queryOptions = new FeedOptions(); + + // note that setMaxItemCount sets the number of items to return in a single page result + queryOptions.setMaxItemCount(pageSize); + queryOptions.setRequestContinuation(continuationToken); + + Iterable> feedResponseIterator = + container.queryItems(query, queryOptions, Family.class).iterableByPage(); + + for (FeedResponse page : feedResponseIterator) { + logger.info(String.format("Current page number: %d", currentPageNumber)); + // Access all of the documents in this result page + for (Family docProps : page.getResults()) { + documentNumber++; + } + + // Accumulate the request charge of this page + requestCharge += page.getRequestCharge(); + + // Page count so far + logger.info(String.format("Total documents received so far: %d", documentNumber)); + + // Request charge so far + logger.info(String.format("Total request charge so far: %f\n", requestCharge)); + + // Along with page results, get a continuation token + // which enables the client to "pick up where it left off" + // in accessing query response pages. + continuationToken = page.getContinuationToken(); + + currentPageNumber++; + } + + } while (continuationToken != null); + + logger.info(String.format("Total request charge: %f\n", requestCharge)); } - private void queryDocuments() throws Exception { - logger.info("Query documents in the container " + containerName + "."); + private void parallelQueryWithPagingAndContinuationTokenAndPrintQueryCharge() throws Exception { + logger.info("Parallel implementation of:"); - String sql = "SELECT * FROM c WHERE c.lastName = 'Witherspoon'"; + FeedOptions options = new FeedOptions(); - CosmosPagedIterable filteredFamilies = container.queryItems(sql, new FeedOptions(), Family.class); + // 0 maximum parallel tasks, effectively serial execution + options.setMaxDegreeOfParallelism(0); + options.setMaxBufferedItemCount(100); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(options); - // Print - if (filteredFamilies.iterator().hasNext()) { - Family family = filteredFamilies.iterator().next(); - logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); - } + // 1 maximum parallel tasks, 1 dedicated asynchronous task to continuously make REST calls + options.setMaxDegreeOfParallelism(1); + options.setMaxBufferedItemCount(100); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(options); - logger.info("Done."); + // 10 maximum parallel tasks, a maximum of 10 dedicated asynchronous tasks to continuously make REST calls + options.setMaxDegreeOfParallelism(10); + options.setMaxBufferedItemCount(100); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(options); + + logger.info("Done with parallel queries."); } - private void replaceDocument() throws Exception { - logger.info("Replace document " + documentId); + private void queryEquality() throws Exception { + logger.info("Query for equality using =="); - // Replace existing document with new modified document - Family family = new Family(); - family.setLastName(documentLastName); - family.setId(documentId); - family.setDistrict("Columbia"); // Document modification + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.id == '" + documentId + "'"); + } - CosmosItemResponse famResp = - container.replaceItem(family, family.getId(), new PartitionKey(family.getLastName()), new CosmosItemRequestOptions()); + private void queryInequality() throws Exception { + logger.info("Query for inequality"); - logger.info("Request charge of replace operation: {} RU", famResp.getRequestCharge()); + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.id != '" + documentId + "'"); + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.id <> '" + documentId + "'"); - logger.info("Done."); + // Combine equality and inequality + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.lastName == '" + documentLastName + "' && c.id != '" + documentId + "'"); } - private void upsertDocument() throws Exception { - logger.info("Replace document " + documentId); + private void queryRange() throws Exception { + logger.info("Numerical range query"); - // Replace existing document with new modified document (contingent on modification). - Family family = new Family(); - family.setLastName(documentLastName); - family.setId(documentId); - family.setDistrict("Columbia"); // Document modification + // Numerical range query + executeQueryPrintSingleResult("SELECT * FROM Families f WHERE f.Children[0].Grade > 5"); + } - CosmosItemResponse famResp = - container.upsertItem(family, new CosmosItemRequestOptions()); + private void queryRangeAgainstStrings() throws Exception { + logger.info("String range query"); - logger.info("Done."); + // String range query + executeQueryPrintSingleResult("SELECT * FROM Families f WHERE f.Address.State > 'NY'"); } - private void replaceDocumentWithConditionalEtagCheck() throws Exception { - logger.info("Replace document " + documentId + ", employing optimistic concurrency using ETag."); + private void queryOrderBy() throws Exception { + logger.info("ORDER BY queries"); - // Obtained current document ETag - CosmosItemResponse famResp = - container.readItem(documentId, new PartitionKey(documentLastName), Family.class); - String etag = famResp.getResponseHeaders().get("etag"); + // Numerical ORDER BY + executeQueryPrintSingleResult("SELECT * FROM Families f WHERE f.LastName = 'Andersen' ORDER BY f.Children[0].Grade"); + } - logger.info("Read document " + documentId + " to obtain current ETag: " + etag); + private void queryDistinct() throws Exception { + logger.info("DISTINCT queries"); - // Modify document - Family family = famResp.getResource(); - family.setRegistered(!family.isRegistered()); + // DISTINCT query + executeQueryPrintSingleResult("SELECT DISTINCT c.lastName from c"); + } - // Persist the change back to the server, updating the ETag in the process - // This models a concurrent change made to the document - CosmosItemResponse updatedFamResp = - container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); - logger.info("'Concurrent' update to document " + documentId + " so ETag is now " + updatedFamResp.getResponseHeaders().get("etag")); + private void queryWithAggregateFunctions() throws Exception { + logger.info("Aggregate function queries"); - // Now update the document and call replace with the AccessCondition requiring that ETag has not changed. - // This should fail because the "concurrent" document change updated the ETag. - try { - AccessCondition ac = new AccessCondition(); - ac.setType(AccessConditionType.IF_MATCH); - ac.setCondition(etag); + // Basic query with aggregate functions + executeQueryPrintSingleResult("SELECT VALUE COUNT(f) FROM Families f WHERE f.LastName = 'Andersen'"); - CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); - requestOptions.setAccessCondition(ac); + // Query with aggregate functions within documents + executeQueryPrintSingleResult("SELECT VALUE COUNT(child) FROM child IN f.Children"); + } - family.setDistrict("Seafood"); + private void querySubdocuments() throws Exception { + // Cosmos DB supports the selection of sub-documents on the server, there + // is no need to send down the full family record if all you want to display + // is a single child - CosmosItemResponse failedFamResp = - container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),requestOptions); + logger.info("Subdocument query"); - } catch (CosmosClientException cce) { - logger.info("As expected, we have a pre-condition failure exception\n"); - } + executeQueryPrintSingleResult("SELECT VALUE c FROM c IN f.Children"); + } - logger.info("Done."); + private void queryIntraDocumentJoin() throws Exception { + // Cosmos DB supports the notion of an Intra-document Join, or a self-join + // which will effectively flatten the hierarchy of a document, just like doing + // a self JOIN on a SQL table + + logger.info("Intra-document joins"); + + // Single join + executeQueryPrintSingleResult("SELECT f.id FROM Families f JOIN c IN f.Children"); + + // Two joins + executeQueryPrintSingleResult("SELECT f.id as family, c.FirstName AS child, p.GivenName AS pet " + + "FROM Families f " + + "JOIN c IN f.Children " + + "join p IN c.Pets"); + + // Two joins and a filter + executeQueryPrintSingleResult("SELECT f.id as family, c.FirstName AS child, p.GivenName AS pet " + + "FROM Families f " + + "JOIN c IN f.Children " + + "join p IN c.Pets " + + "WHERE p.GivenName = 'Fluffy'"); } - private void readDocumentOnlyIfChanged() throws Exception { - logger.info("Read document " + documentId + " only if it has been changed, utilizing an ETag check."); + private void queryStringMathAndArrayOperators() throws Exception { + logger.info("Queries with string, math and array operators"); - // Read document - CosmosItemResponse famResp = - container.readItem(documentId, new PartitionKey(documentLastName), Family.class); - logger.info("Read doc with status code of {}", famResp.getStatusCode()); + // String STARTSWITH operator + executeQueryPrintSingleResult("SELECT * FROM family WHERE STARTSWITH(family.LastName, 'An')"); - // Re-read but with conditional access requirement that ETag has changed. - // This should fail. + // Round down numbers with FLOOR + executeQueryPrintSingleResult("SELECT VALUE FLOOR(family.Children[0].Grade) FROM family"); - String etag = famResp.getResponseHeaders().get("etag"); - AccessCondition ac = new AccessCondition(); - ac.setType(AccessConditionType.IF_NONE_MATCH); - ac.setCondition(etag); - CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); - requestOptions.setAccessCondition(ac); + // Get number of children using array length + executeQueryPrintSingleResult("SELECT VALUE ARRAY_LENGTH(family.Children) FROM family"); + } - CosmosItemResponse failResp = - container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + private void queryWithQuerySpec() throws Exception { + logger.info("Query with SqlQuerySpec"); - logger.info("Re-read doc with status code of {} (we anticipate failure due to ETag not having changed.)", failResp.getStatusCode()); + FeedOptions options = new FeedOptions(); + options.setPartitionKey(new PartitionKey("Witherspoon")); - // Replace the doc with a modified version, which will update ETag - Family family = famResp.getResource(); - family.setRegistered(!family.isRegistered()); - CosmosItemResponse failedFamResp = - container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); - logger.info("Modified and replaced the doc (updates ETag.)"); + // Simple query with a single property equality comparison + // in SQL with SQL parameterization instead of inlining the + // parameter values in the query string - // Re-read doc again, with conditional acccess requirements. - // This should succeed since ETag has been updated. - CosmosItemResponse succeedResp = - container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); - logger.info("Re-read doc with status code of {} (we anticipate success due to ETag modification.)", succeedResp.getStatusCode()); + SqlParameterList paramList = new SqlParameterList(); + paramList.add(new SqlParameter("@id", "AndersenFamily")); + SqlQuerySpec querySpec = new SqlQuerySpec( + "SELECT * FROM Families f WHERE (f.id = @id)", + paramList); - logger.info("Done."); + executeQueryWithQuerySpecPrintSingleResult(querySpec); + + // Query using two properties within each document. WHERE Id == "" AND Address.City == "" + // notice here how we are doing an equality comparison on the string value of City + + paramList = new SqlParameterList(); + paramList.add(new SqlParameter("@id", "AndersenFamily")); + paramList.add(new SqlParameter("@city", "Seattle")); + querySpec = new SqlQuerySpec( + "SELECT * FROM Families f WHERE f.id = @id AND f.Address.City = @city", + paramList); + + executeQueryWithQuerySpecPrintSingleResult(querySpec); } // Document delete From 5ff3b6e3f17d80d1f86409ba119228bfc112f6e5 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 15 May 2020 05:08:01 -0700 Subject: [PATCH 108/110] Autoscale container CRUD demo --- .../AutoscaleContainerCRUDQuickstart.java | 195 ++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java diff --git a/src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java new file mode 100644 index 0000000..8ffcb2d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java @@ -0,0 +1,195 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.autoscalecontainercrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AutoscaleContainerCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following AUTOSCALE container CRUD operations: + * -Create + * -Update throughput + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + AutoscaleContainerCRUDQuickstart p = new AutoscaleContainerCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.autoscaleContainerCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void autoscaleContainerCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + readContainerById(); + readAllContainers(); + // deleteAContainer() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create autoscale container " + containerName + " if not exists."); + + // Container and autoscale throughput settings + CosmosContainerProperties autoscaleContainerProperties = new CosmosContainerProperties(containerName, "/lastName"); + ThroughputProperties autoscaleThroughputProperties = ThroughputProperties.createAutoscaledThroughput(200); //Set autoscale max RU/s + + // Create the container with autoscale enabled + container = database.createContainer(autoscaleContainerProperties, autoscaleThroughputProperties, + new CosmosContainerRequestOptions()).getContainer(); + + logger.info("Done."); + } + + // Update container throughput + private void updateContainerThroughput() throws Exception { + logger.info("Update autoscale max throughput for container " + containerName + "."); + + // Change the autoscale max throughput (RU/s) + container.replaceThroughput(ThroughputProperties.createAutoscaledThroughput(400)); + + logger.info("Done."); + } + + private void readContainerThroughput() throws Exception { + // Read the throughput on a resource + ThroughputProperties autoscaleContainerThroughput = container.readThroughput().getProperties(); + + // The autoscale max throughput (RU/s) of the resource + int autoscaleMaxThroughput = autoscaleContainerThroughput.getAutoscaleMaxThroughput(); + + // The throughput (RU/s) the resource is currently scaled to + int currentThroughput = autoscaleContainerThroughput.Throughput; + + logger.info("Autoscale max throughput: {} current throughput: {}",autoscaleMaxThroughput,currentThroughput); + } + + // Container read + private void readContainerById() throws Exception { + logger.info("Read container " + containerName + " by ID."); + + // Read container by ID + container = database.getContainer(containerName); + + logger.info("Done."); + } + + // Container read all + private void readAllContainers() throws Exception { + logger.info("Read all containers in database " + databaseName + "."); + + // Read all containers in the account + CosmosPagedIterable containers = database.readAllContainers(new FeedOptions()); + + // Print + String msg="Listing containers in database:\n"; + for(CosmosContainerProperties containerProps : containers) { + msg += String.format("-Container ID: %s\n",containerProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Container delete + private void deleteAContainer() throws Exception { + logger.info("Delete container " + containerName + " by ID."); + + // Delete container + CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); + logger.info("Status code for container delete: {}",containerResp.getStatusCode()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteAContainer(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} From 778409ef528d49a327e7430936a4016394a7dd44 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 15 May 2020 05:10:46 -0700 Subject: [PATCH 109/110] Added autoscale database CRUD demo --- .../sync/AutoscaleDatabaseCRUDQuickstart.java | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java diff --git a/src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java new file mode 100644 index 0000000..602913d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.autoscaledatabasecrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosDatabaseProperties; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AutoscaleDatabaseCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + + private CosmosDatabase database; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following AUTOSCALE database CRUD operations: + * -Create + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + AutoscaleDatabaseCRUDQuickstart p = new AutoscaleDatabaseCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.autoscaleDatabaseCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void autoscaleDatabaseCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + readDatabaseById(); + readAllDatabases(); + // deleteADatabase() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Autoscale throughput settings + ThroughputProperties autoscaleThroughputProperties = ThroughputProperties.createAutoscaledThroughput(400); //Set autoscale max RU/s + + //Create the database with autoscale enabled + CosmosDatabase database = client.createDatabase(databaseName, autoscaleThroughputProperties).getDatabase(); + + logger.info("Done."); + } + + // Database read + private void readDatabaseById() throws Exception { + logger.info("Read database " + databaseName + " by ID."); + + // Read database by ID + database = client.getDatabase(databaseName); + + logger.info("Done."); + } + + // Database read all + private void readAllDatabases() throws Exception { + logger.info("Read all databases in the account."); + + // Read all databases in the account + CosmosPagedIterable databases = client.readAllDatabases(new FeedOptions()); + + // Print + String msg="Listing databases in account:\n"; + for(CosmosDatabaseProperties dbProps : databases) { + msg += String.format("-Database ID: %s\n",dbProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} From de4aa7cd223a9920be17f9cdc578a8f86917eba9 Mon Sep 17 00:00:00 2001 From: Andrew Feldman Date: Fri, 15 May 2020 05:17:04 -0700 Subject: [PATCH 110/110] Analytical store sample --- .../AnalyticalContainerCRUDQuickstart.java | 184 ++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java diff --git a/src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java new file mode 100644 index 0000000..fe73be1 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.analyticalcontainercrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AnalyticalContainerCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following ANALYTICAL STORE container CRUD operations: + * -Create + * -Update throughput + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + AnalyticalContainerCRUDQuickstart p = new AnalyticalContainerCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.containerCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void containerCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + readContainerById(); + readAllContainers(); + // deleteAContainer() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Set analytical store properties + containerProperties.setAnalyticalStoreTimeToLiveInSeconds(-1); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + // Update container throughput + private void updateContainerThroughput() throws Exception { + logger.info("Update throughput for container " + containerName + "."); + + // Specify new throughput value + container.replaceProvisionedThroughput(400); + + logger.info("Done."); + } + + // Container read + private void readContainerById() throws Exception { + logger.info("Read container " + containerName + " by ID."); + + // Read container by ID + container = database.getContainer(containerName); + + logger.info("Done."); + } + + // Container read all + private void readAllContainers() throws Exception { + logger.info("Read all containers in database " + databaseName + "."); + + // Read all containers in the account + CosmosPagedIterable containers = database.readAllContainers(new FeedOptions()); + + // Print + String msg="Listing containers in database:\n"; + for(CosmosContainerProperties containerProps : containers) { + msg += String.format("-Container ID: %s\n",containerProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Container delete + private void deleteAContainer() throws Exception { + logger.info("Delete container " + containerName + " by ID."); + + // Delete container + CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); + logger.info("Status code for container delete: {}",containerResp.getStatusCode()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteAContainer(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +}