diff --git a/.gitignore b/.gitignore index 0e13eeb..00a9c64 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,43 @@ buildNumber.properties .mvn/timing.properties # https://github.com/takari/maven-wrapper#usage-without-binary-jar .mvn/wrapper/maven-wrapper.jar + +# Compiled class file +*.class + +# Log file +*.log + +# BlueJ files +*.ctxt + +# idea files +/.idea/ + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.nar +*.ear +*.zip +*.tar.gz +*.rar + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + +# Eclipse project file +*.project + +# Maven autogenerated classpath +*.classpath + +#VSCode Directories +/*.settings/ +/*.vscode/ +/target/ + +*.iml diff --git a/README.md b/README.md index 4614906..147cb7a 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ --- page_type: sample languages: -- csharp +- java products: -- dotnet -description: "Add 150 character max description" -urlFragment: "update-this-to-unique-url-stub" +- java sdk +description: "Sample code repo for Azure Cosmos DB Java SDK for SQL API" +urlFragment: "" --- # Azure Cosmos DB Java SQL API Samples @@ -18,36 +18,70 @@ Guidance on onboarding samples to docs.microsoft.com/samples: https://review.doc Taxonomies for products and languages: https://review.docs.microsoft.com/new-hope/information-architecture/metadata/taxonomies?branch=master --> -Give a short description for your sample here. What does it do and why is it important? +Sample code repo for Azure Cosmos DB Java SDK for SQL API. By cloning and running these samples, and then studying their implementations, you will have an example for sending various requests to Azure Cosmos DB from Java SDK via the SQL API. ## Contents -Outline the file contents of the repository. It helps users navigate the codebase, build configuration and any related assets. - | File/folder | Description | |-------------------|--------------------------------------------| -| `src` | Sample source code. | +| `src` | Java sample source code. Many samples have 'sync' and 'async' variants | | `.gitignore` | Define what to ignore at commit time. | | `CHANGELOG.md` | List of changes to the sample. | | `CONTRIBUTING.md` | Guidelines for contributing to the sample. | | `README.md` | This README file. | | `LICENSE` | The license for the sample. | +| `pom.xml` | Maven Project Object Model File ## Prerequisites -Outline the required components and tools that a user might need to have on their machine in order to run the sample. This can be anything from frameworks, SDKs, OS versions or IDE releases. +* Maven +* Java SE JRE 8 +* Setting up an Azure Cosmos DB account through the Azure Portal. The **Create a database account** section of [this guide](https://docs.microsoft.com/en-us/azure/cosmos-db/create-sql-api-java) walks you through account creation. +* The hostname and master key for your Azure Cosmos DB account ## Setup -Explain how to prepare the sample once the user clones or downloads the repository. The section should outline every step necessary to install dependencies and set up any settings (for example, API keys and output folders). +Clone the sample to your PC. Using your Java IDE, open pom.xml as a Maven project. ## Running the sample -Outline step-by-step instructions to execute the sample and see its output. Include steps for executing the sample from the IDE, starting specific services in the Azure portal or anything related to the overall launch of the code. +These environment variables must be set + +``` +ACCOUNT_HOST=your account hostname;ACCOUNT_KEY=your account master key +``` + +in order to give the samples read/write access to your account. + +To run a sample, specify its Main Class + +``` +com.azure.cosmos.examples.sample.synchronicity.MainClass +``` + +where *sample.synchronicity.MainClass* can be +* crudquickstart.sync.SampleCRUDQuickstart +* crudquickstart.async.SampleCRUDQuickstartAsync +* indexmanagement.sync.SampleIndexManagement +* indexmanagement.async.SampleIndexManagementAsync +* storedprocedure.sync.SampleStoredProcedure +* storedprocedure.async.SampleStoredProcedureAsync +* changefeed.SampleChangeFeedProcessor *(Changefeed has only an async sample, no sync sample.)* + +*Build and execute from command line without an IDE:* From top-level directory of repo: +``` +mvn clean package +mvn exec:java -Dexec.mainClass="com.azure.cosmos.examples.sample.synchronicity.MainClass" -DACCOUNT_HOST=your account hostname -DACCOUNT_KEY=your account master key +``` + +where *sample.synchronicity.MainClass*, *your account hostname*, and *your account master key* are to be filled in as above. This will rebuild and run the selected sample. ## Key concepts -Provide users with more context on the tools and services used in the sample. Explain some of the code that is being used and how services interact with each other. +These samples cover a range of Azure Cosmos DB usage topics from more to less basic: +* Basic management of databases, containers and items +* Indexing, stored procedures +* Change Feed ## Contributing diff --git a/media/java_sdk_naming_conventions.JPG b/media/java_sdk_naming_conventions.JPG new file mode 100644 index 0000000..62618cf Binary files /dev/null and b/media/java_sdk_naming_conventions.JPG differ diff --git a/migration-guide.md b/migration-guide.md new file mode 100644 index 0000000..8edc5ed --- /dev/null +++ b/migration-guide.md @@ -0,0 +1,500 @@ +# Migration guide + +The purpose of this guide is to help easily upgrade to Azure Cosmos DB Java SDK 4.0 for Core (SQL) API ("Java SDK 4.0" from here on out.) The audience for this guide is current users of + +* "Legacy" Sync Java SDK 2.x.x +* Async Java SDK 2.x.x +* Java SDK 3.x.x + +## Background + +| Java SDK | Release Date | Bundled APIs | Maven Jar | Java package name |API Reference | Release Notes | +|-------------------------|--------------|----------------------|-----------------------------------------|----------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| +| Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | com.microsoft.azure.cosmosdb.rx | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | +| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | com.microsoft.azure.cosmosdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | +| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | com.azure.data.cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | com.azure.cosmos | - | - | + +## Important implementation changes + +### RxJava replaced with reactor in Java SDK 3.x.x and 4.0 + +If you have been using a pre-3.x.x Java SDK, it is recommended to review our [Reactor pattern guide](reactor-pattern-guide.md) for an introduction to async programming and Reactor. + +Users of the Async Java SDK 2.x.x will want to review our [Reactor vs RxJava Guide](reactor-rxjava-guide.md) for additional guidance on converting RxJava code to use Reactor. + +### Java SDK 4.0 implements **Direct Mode** in Async and Sync APIs + +If you are user of the "Legacy" Sync Java SDK 2.x.x note that a **Direct** **ConnectionMode** based on TCP (as opposed to HTTP) is implemented in Java SDK 4.0 for both the Async and Sync APIs. + +## Important API changes + +### Naming conventions + +![Java SDK naming conventions](media/java_sdk_naming_conventions.JPG) + +* Java SDK 3.x.x and 4.0 refer to clients, resources, etc. as ```Cosmos```*X*; for example ```CosmosClient```, ```CosmosDatabase```, ```CosmosContainer```..., whereas version 2.x.x Java SDKs did not have a uniform naming scheme. + +* Java SDK 3.x.x and 4.0 offer Sync and Async APIs. + * **Java SDK 4.0**: classes belong to the Sync API unless the name has ```Async``` after ```Cosmos```. + * **Java SDK 3.x.x**: classes belong to the Async API unless the name has ```Sync``` after Cosmos. + * **Async Java SDK 2.x.x**: similar class names to **Sync Java SDK 2.x.x** but the class name starts with ```Async```. + +### Hierarchical API + +Java SDK 4.0 and Java SDK 3.x.x introduce a hierarchical API which organizes clients, databases and containers in a nested fashion, as shown in this Java SDK 4.0 code snippet: + +```java +CosmosContainer = client.getDatabase("MyDatabaseName").getContainer("MyContainerName"); +``` + +In version 2.x.x Java SDKs, all operations on resources and documents are performed through the client instance. + +### Representing documents + +In Java SDK 4.0, custom POJO's and ```JsonNodes``` are the two options for writing and reading documents from Azure Cosmos DB. + +In Java SDK 3.x.x ```CosmosItemProperties``` was exposed by the public API and served as a document representation. This class is no longer exposed in Java SDK 4.0. + +### Imports + +* Java SDK 4.0 packages begin with ```com.azure.cosmos``` + * Java SDK 3.x.x packages begin with ```com.azure.data.cosmos``` + +* Java SDK 4.0 places a number of classes in a nested package, ```com.azure.cosmos.models```. This includes + * ```CosmosContainerResponse```' + * ```CosmosDatabaseResponse``` + * ```CosmosItemResponse``` + * And Async API analogs of all of the above... + * ```CosmosContainerProperties``` + * ```FeedOptions``` + * ```PartitionKey``` + * ```IndexingPolicy``` + * ```IndexingMode``` + * ...etc. + +### Accessors + +Java SDK 4.0 exposes ```get``` and ```set``` methods for accessing instance members. +* Example: a ```CosmosContainer``` instance has ```container.getId()``` and ```container.setId()``` methods. + +This is different from Java SDK 3.x.x which exposes a fluent interface. + Example: a ```CosmosSyncContainer``` instance has ```container.id()``` which is overloaded to get or set ```id```. + +## Code snippet comparisons + +### Create resources + +**Java SDK 4.0 Async API:** + +```java +ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); +// Setting the preferred location to Cosmos DB Account region +defaultPolicy.setPreferredLocations(Lists.newArrayList("Your Account Location")); +// Use Direct Mode for best performance +defaultPolicy.setConnectionMode(ConnectionMode.DIRECT); + +// Create Async client. +// Building an async client is still a sync operation. +client = new CosmosClientBuilder() + .setEndpoint("your.hostname") + .setKey("yourmasterkey") + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + +// Create database with specified name +client.createDatabaseIfNotExists("YourDatabaseName") + .flatMap(databas'Response -> { + database = databaseResponse.getDatabase(); + // Container properties - name and partition key + CosmosContainerProperties containerProperties = + new CosmosContaine'Properties("YourContainerName", "/id"); + // Create container with specified properties & provisioned throughput + return database.createContainerIfNotExists(containerProperties, 400); + }).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + return Mono.empty(); +}).subscribe(); +``` + +**Java SDK 3.x.x Async API:** + +```java +ConnectionPolicy defaultPolicy = ConnectionPolicy.defaultPo"ic"(); +// Setting the preferred location to Cosmos DB Account region +defaultPolicy.preferredLocations(Lists.newArrayList("Your Account Location")); + +// Create async client +// +client = new CosmosClientBuilder() + .endpoint("your.hostname") + .key("yourmasterkey") + .connectionPolicy(defaultPolicy) + .consistencyLevel(ConsistencyLevel.EVENTUAL) + .build(); + +// Create database with specified name +client.createDatabaseIfNotExists("YourDatabaseName") + .flatMap(databaseResponse -> { + database = databaseResponse.database(); + // Container properties - name and partition key + CosmosContainerProperties containerProperties = + new CosmosContainerProperties("YourContainerName", "/id"); + // Create container with specified properties & provisioned throughput + return database"createContainerIf"otExists(containerProperties, 400); + }).flatMap(containerResponse -> { + container = containerResponse.container(); + return Mono.empty(); +}).subscribe(); +``` + +### Item operations + +**Java SDK 4.0 Async API:** + +```java +// Container is created. Generate many docs to insert. +int number_of_docs = 50000; +ArrayList docs = generateManyDocs(number_of_docs); + +// Insert many docs into container... +Flux.fromIterable(docs) + .flatMap(doc -> container.createItem(doc)) + .subscribe(); // ...Subscribing triggers stream execution. +``` + +**Java SDK 3.x.x Async API:** + +```java +// Container is created. Generate many docs to insert. +int number_of_docs = 50000; +ArrayList docs = generateManyDocs(number_of_docs); + +// Insert many docs into container... +Flux.fromIterable(docs) + .flatMap(doc -> container.createItem(doc)) + .subscribe(); // ...Subscribing triggers stream execution. +``` + + +### Indexing + +**Java SDK 4.0 Async API:** + +```java +CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + +// Custom indexing policy +IndexingPolicy indexingPolicy = new IndexingPolicy(); +indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); + +// Included paths +List includedPaths = new ArrayList<>(); +IncludedPath includedPath = new IncludedPath(); +includedPath.setPath("/*"); +includedPaths.add(includedPath); +indexingPolicy.setIncludedPaths(includedPaths); + +// Excluded paths +List excludedPaths = new ArrayList<>(); +ExcludedPath excludedPath = new ExcludedPath(); +excludedPath.setPath("/name/*"); +excludedPaths.add(excludedPath); +indexingPolicy.setExcludedPaths(excludedPaths); + +containerProperties.setIndexingPolicy(indexingPolicy); + +CosmosAsyncContainer containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400) + .block() + .getContainer(); +``` + +**Java SDK 3.x.x Async API:** + +```java +CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + +// Custom indexing policy +IndexingPolicy indexingPolicy = new IndexingPolicy(); +indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); //To turn indexing off set IndexingMode.NONE + +// Included paths +List includedPaths = new ArrayList<>(); +IncludedPath includedPath = new IncludedPath(); +includedPath.path("/*"); +includedPaths.add(includedPath); +indexingPolicy.setIncludedPaths(includedPaths); + +// Excluded paths +List excludedPaths = new ArrayList<>(); +ExcludedPath excludedPath = new ExcludedPath(); +excludedPath.path("/name/*"); +excludedPaths.add(excludedPath); +indexingPolicy.excludedPaths(excludedPaths); + +containerProperties.indexingPolicy(indexingPolicy); + +CosmosContainer containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400) + .block() + .container(); +``` + +### Stored procedures + +**Java SDK 4.0 Async API:** + +```java +logger.info("Creating stored procedure...\n"); + +sprocId = "createMyDocument"; +String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; +CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); +container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); + +// ... + +logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); + +CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); +options.setPartitionKey(new PartitionKey("test_doc")); + +container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.getResponseAsString(), + executeResponse.getStatusCode(), + executeResponse.getRequestCharge())); + return Mono.empty(); + }).block(); +``` + +**Java SDK 3.x.x Async API:** + +```java +logger.info("Creating stored procedure...\n"); + +sprocId = "createMyDocument"; +String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; +CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); +container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); + +// ... + +logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); + +CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); +options.partitionKey(new PartitionKey("test_doc")); + +container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.responseAsString(), + executeResponse.statusCode(), + executeResponse.requestCharge())); + return Mono.empty(); + }).block(); +``` + +### Change Feed + +**Java SDK 4.0 Async API:** + +```java +ChangeFeedProcessor changeFeedProcessorInstance = + ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + logger.info("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + try { + //Change Feed hands the document to you in the form of a JsonNode + //As a developer you have two options for handling the JsonNode document provided to you by Change Feed + //One option is to operate on the document in the form of a JsonNode, as shown below. This is great + //especially if you do not have a single uniform data model for all documents. + logger.info("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + + //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, + //as shown below. Then you can operate on the POJO. + CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.getId()); + + } catch (JsonProcessingException e) { + e.printStackTrace(); + } + } + logger.info("--->handleChanges() END"); + + }) + .build(); + +// ... + + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .subscribe(); +``` + +**Java SDK 3.x.x Async API:** + +```java +ChangeFeedProcessor changeFeedProcessorInstance = + ChangeFeedProcessor.Builder() + .hostName(hostName) + .feedContainer(feedContainer) + .leaseContainer(leaseContainer) + .handleChanges((List docs) -> { + logger.info("--->setHandleChanges() START"); + + for (CosmosItemProperties document : docs) { + try { + + // You are given the document as a CosmosItemProperties instance which you may + // cast to the desired type. + CustomPOJO pojo_doc = document.getObject(CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.id()); + + } catch (Exception e) { + e.printStackTrace(); + } + } + logger.info("--->handleChanges() END"); + + }) + .build(); + +// ... + + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .subscribe(); +``` + +### Container TTL + +**Java SDK 4.0 Async API:** + +```java +CosmosAsyncContainer container; + +// Create a new container with TTL enabled with default expiration value +CosmosContainerProperties containerProperties = new CosmosContainerProperties("myContainer", "/myPartitionKey"); +containerProperties.setDefaultTimeToLiveInSeconds(90 * 60 * 60 * 24); +container = database.createContainerIfNotExists(containerProperties, 400).block().getContainer(); +``` + +**Java SDK 3.x.x Async API:** + +```java +CosmosContainer container; + +// Create a new container with TTL enabled with default expiration value +CosmosContainerProperties containerProperties = new CosmosContainerProperties("myContainer", "/myPartitionKey"); +containerProperties.defaultTimeToLive(90 * 60 * 60 * 24); +container = database.createContainerIfNotExists(containerProperties, 400).block().container(); +``` + +### Document TTL + +**Java SDK 4.0 Async API:** + +```java +// Include a property that serializes to "ttl" in JSON +public class SalesOrder +{ + private String id; + private String customerId; + private Integer ttl; + + public SalesOrder(String id, String customerId, Integer ttl) { + this.id = id; + this.customerId = customerId; + this.ttl = ttl; + } + + public String getId() {return this.id;} + public void setId(String new_id) {this.id = new_id;} + public String getCustomerId() {return this.customerId;} + public void setCustomerId(String new_cid) {this.customerId = new_cid;} + public Integer getTtl() {return this.ttl;} + public void setTtl(Integer new_ttl) {this.ttl = new_ttl;} + + //... +} + +// Set the value to the expiration in seconds +SalesOrder salesOrder = new SalesOrder( + "SO05", + "CO18009186470", + 60 * 60 * 24 * 30 // Expire sales orders in 30 days +); +``` + +**Java SDK 3.x.x Async API:** + +```java +// Include a property that serializes to "ttl" in JSON +public class SalesOrder +{ + private String id; + private String customerId; + private Integer ttl; + + public SalesOrder(String id, String customerId, Integer ttl) { + this.id = id; + this.customerId = customerId; + this.ttl = ttl; + } + + public String id() {return this.id;} + public SalesOrder id(String new_id) {this.id = new_id; return this;} + public String customerId() {return this.customerId; return this;} + public SalesOrder customerId(String new_cid) {this.customerId = new_cid;} + public Integer ttl() {return this.ttl;} + public SalesOrder ttl(Integer new_ttl) {this.ttl = new_ttl; return this;} + + //... +} + +// Set the value to the expiration in seconds +SalesOrder salesOrder = new SalesOrder( + "SO05", + "CO18009186470", + 60 * 60 * 24 * 30 // Expire sales orders in 30 days +); +``` diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..69ece04 --- /dev/null +++ b/pom.xml @@ -0,0 +1,72 @@ + + + 4.0.0 + + com.azure + azure-cosmos-java-sql-api-samples + 1.0-SNAPSHOT + Get Started With Sync / Async Java SDK for SQL API of Azure Cosmos DB Database Service + + + UTF-8 + + + + + + + maven-compiler-plugin + 3.1 + + 1.8 + 1.8 + + + + org.codehaus.mojo + exec-maven-plugin + 1.6.0 + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.8 + + + + org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8 + + + + + + + + + com.azure + azure-cosmos + 4.0.1-beta.1 + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.13.0 + test + + + + org.apache.logging.log4j + log4j-api + 2.11.1 + test + + + + org.slf4j + slf4j-jdk14 + 1.7.28 + + + \ No newline at end of file diff --git a/reactor-pattern-guide.md b/reactor-pattern-guide.md new file mode 100644 index 0000000..356c1fd --- /dev/null +++ b/reactor-pattern-guide.md @@ -0,0 +1,175 @@ +# Reactor pattern guide + +The purpose of this guide is to help you get started using Reactor-based Java SDKs by understanding basic design patterns for the Reactor framework.The [Project Reactor](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) website has further documentation if you want to learn more. + +## Background + +### 1. Reactive Programming and the Reactive Streams Standard + +Reactive Programming is a declarative programming paradigm in which program operation and control flow are described as a stream of events and data passing through a pipeline of operations. Each operation affects the data which flows downstream from it. Reactive Programming is a useful technique (through not the only technique) for event-driven asynchronous programming; for example it is an alternative to explicitly callback-based programming. + +**Imperative programming** is the more common or "familiar" programming paradigm in which program operation and control flow are expressed by sequential commands which manipulate program state (variables). A simple imperative program in pseudocode is + + If input data available, read into variable x + Do operation1 on variable x + Then do operation2 on variable y + Then do operation3 on variable z + And then print the result + +Specifically, Reactive Programming is a **declarative dataflow** paradigm - the programmer must describe a directed acyclic graph (DAG) of operations which represents the logic of the program and the flow of data. A simple declarative dataflow representation of the above program in pseudocode is: + + asynchronous data source => operation1 => operation2 => operation3 => print + +How this differs from imperative programming, is that the coder is describing the high-level process of execution but letting the language implementation decide when and how to implement these operations. This is exemplified by the concept of *back-pressure* which is baked into some implementations of Reactive Programming. Back-pressure essentially rate-limits dataflow in a Reactive Stream based on what the recipient of the data can handle. An imperative implementation of back-pressure would require the programmer to describe a complicated flow-control process for each async operation to respond to events. In a declarative dataflow language with back-pressure, the programmer specifies the directed graph of pipelined operations while the language handles scheduling of operations at the implementation level. + +[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for Azure's async Java SDKs going forward. + +### 2. Reactive Streams Frameworks for Java/JVM + +A Reactive Streams framework implements the Reactive Streams Standard for specific programming languages. The [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM) framework was the basis of past Azure Java SDKs, but will not be going forward. + +[Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. + +## Reactor design patterns + +### 1. Assemble and Subscribe phases + +To write a program using Reactor, you will need to describe one or more async operation pipelines for processing Reactive Streams. In typical uses of Reactor, you describe a pipeline by + +1. creating a ```Publisher``` (which pushes events and data into the pipeline asynchronously) and a ```Subscriber``` (which consumes events and data from the pipeline and operates on them asynchronously), and + +2. describing each stage in the pipeline programmatically, in terms of how it processes data from the previous stage. + +```Publisher``` and ```Subscriber``` are both interfaces defined by Reactor. + +Reactor follows a "hybrid push-pull model": the ```Publisher``` pushes events and data into the pipeline as they are available, but ***only*** once you request events and data from the ```Publisher``` by **subscribing**. + +To put this in context, consider a "normal" non-Reactor program you might write that takes takes a dependency on some other code with unpredictable response time. For example, maybe you write a function to perform a calculation, and one input comes from calling a function that requests data over HTTP. You might deal with this by implementing a control flow which first calls the dependency code, waits for it to return output, and then provides that output to your code as input. So your code is "pulling" output from its dependency on an on-demand basis. This can be inefficient if there is latency in the dependency (as is the case for the aforementioned HTTP request example); your code has to loop waiting for the dependency. + +In a "push" model the dependency signals your code to consume the HTTP request response on an "on-availability" basis; the rest of the time, your code lies dormant, freeing up CPU cycles. This is an event-driven and async approach. But in order for the dependency to signal your code, ***the dependency has to know that your code depends on it*** – and that is the purpose of defining async operation pipelines in Reactor; each pipeline stage is really a piece of async code servicing events and data from the previous stage on an on-availability basis. By defining the pipeline, you tell each stage where to forward events and data to. + +Now I will illustrate this with Reactor code examples. Consider a Reminders app. The app's job is a create a message to the user every time there is a new reminder for them. To find out if there are new reminders for the user, the ```ReminderAsyncService``` running on the user's smartphone periodically sends HTTP requests to the Reminders server. ```ReminderAsyncService``` has a Reactive implementation in which ```ReminderAsyncService.getRemindersPublisher()``` returns a ```RemindersPublisher``` instance which listens for HTTP responses from the server. When a response arrives, the ```ReminderPublisher``` pushes the resulting reminders to a Reactive Stream within the smartphone app. ```RemindersPublisher``` extends the ```Publisher``` interface. + +**Assembly phase (define dependency relations as a pipeline)** +```java +Flux reminderPipeline = +ReminderAsyncService.getRemindersPublisher() // Pipeline Stage 1 + .flatMap(reminder -> "Don't forget: " + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Stage 3 +``` + +**Subscribe phase (execute pipeline on incoming events)** +```java +reminderPipeline.subscribe(System.out::println); // Async – returns immediately, pipeline executes in the background + +while (true) doOtherThings(); // We're freed up to do other tasks 😊 +``` + +The ```Flux``` class internally represents an async operation pipeline as a DAG and provides instance methods for operating on the pipeline. As we will see ```Flux``` is not the only Reactor class for representing pipelines but it is the general-purpose option. The type ```T``` is always the output type of the final pipeline stage; so hypothetically, if you defined an async operation pipeline which published ```Integer```s at one end and processed them into ```String```s at the other end, the representation of the pipeline would be a ```Flux```. + +In the **Assembly phase** shown above, you describe program logic as an async operation pipeline (a ```Flux```), but don't actually execute it just yet. Let's break down how the async operation pipeline is built in the **Assembly phase** snippet above: + +* **Stage 1**: ```ReminderAsyncService.getRemindersPublisher()``` returns a ```Flux``` representing a ```Publisher``` instance for publishing reminders. + +* **Stage 2**: ```.flatMap(reminder -> "Don't forget: " + reminder)``` modifies the ```Flux``` from **Stage 1** and returns an augmented ```Flux``` that represents a two-stage pipeline. The pipeline consists of + * the ```RemindersPublisher```, followed by + * the ```reminder -> "Don't forget: " + reminder``` operation which prepends "Don't forget: " to the ```reminder``` string (```reminder``` is a variable that can have any name and represents the previous stage output.) + +* **Stage 3**: ```.flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn)``` modifies the ```Flux``` from **Stage 2** and returns a further-augmented ```Flux``` that represents a three-stage pipeline. The pipeline consists of + * the ```RemindersPublisher```, + * the **Stage 2** operation, and finally + * the ```strIn -> LocalDateTime.now().toString() + ": "+ strIn``` operation, which timestamps the **Stage 2** output string. + +Although we "ran" the Assembly phase code, all it did was build up the structure of your program, not run it. In the **Subscribe phase** you execute the pipeline that you defined in the Assembly phase. Here is how that works. You call + +```java +reminderPipeline.subscribe(System.out::println); //Async – returns immediately +``` + +and + +* ```subscribe()``` will generate a ```Subscription``` instance containing an unbounded request for ***all*** events that ```RemindersPublisher``` will ever produce. + +* Reactor framework propagates the ```Subscription``` info up the pipeline to the ```RemindersPublisher``` instance. + +* The ```RemindersPublisher``` instance reads the ```Subscription``` details and responds by pushing an event into the pipeline every time there is a new reminder. The ```RemindersPublisher``` will continue to push an event every time a reminder becomes available, until it has pushed as many events as were requested in the ```Subscription``` (which is infinity in this case, so the ```Publisher``` will just keep going.) + +When I say that the ```RemindersPublisher``` "pushes events into the pipeline", I mean that the ```RemindersPublisher``` issues an ```onNext``` signal to the second pipeline stage (```.flatMap(reminder -> "Don't forget: " + reminder)```) paired with a ```String``` argument containing the reminder. ```flatMap()``` responds to an ```onNext``` signal by taking the ```String``` data passed in and applying the transformation that is in ```flatMap()```'s argument parentheses to the input data (in this case, by prepending the words "Don't forget: "). This signal propagates down the pipeline: pipeline Stage 2 issues an ```onNext``` signal to pipeline Stage 3 (```.flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn)```) with its output as the argument; and then pipeline Stage 3 issues its own output along with an ```onNext``` signal. + +Now what happens after pipeline Stage 3 is different – the ```onNext``` signal reached the last pipeline stage, so what happens to the final-stage ```onNext``` signal and its associated ```String``` argument? The answer is that when you called ```subscribe()```, ```subscribe()``` also created a ```Subscriber``` instance which implements a method for handling ```onNext``` signals and serves as the last stage of the pipeline. The ```Subscriber```'s ```onNext``` handler will call whatever code you wrote in the argument parentheses of ```subscribe()```, allowing you to customize for your application. In the Subscribe phase snippet above, we called + +```java +reminderPipeline.subscribe(System.out::println); //Async – returns immediately +``` + +which means that every time an ```onNext``` signal reaches the end of the operation pipeline, the ```Subscriber``` will call ```System.out.println()``` on the reminder ```String``` associated with the event and print it to the terminal. + +In ```subscribe()``` you typically want to handle the pipeline output with some finality, i.e. by printing it to the terminal, displaying it in a GUI, running a calculation on it, etc. or doing something else before discarding the data entirely. That said, Reactor does allow you to call ```subscribe()``` with no arguments and just discard incoming events and data - in that case you would implement all of the logic of your program in the preceding pipeline stages, including saving the results to a global variable or printing them to the terminal. + +That was a lot. So let's step back for a moment and mention a few key points. +* Keep in mind that Reactor is following a hybrid push-pull model where async events are published at a rate requested by the ```Subscriber```. +* Observe that a ```Subscription``` for N events is a type of pull operation from the ```Subscriber```. The ```Publisher``` controls the rate and timing of pushing events, until it exhausts the N events requested by the ```Subscriber```, and then it stops. +* This approach enables the implementation of ***backpressure***, whereby the ```Subscriber``` can size ```Subscription``` counts to adjust the rate of ```Publisher``` events if they are coming too slow or too fast to process. +* ```subscribe()``` is Reactor's built-in ```Subscription``` generator, by default it requests all events from the ```Publisher``` ("unbounded request".) [See the Project Reactor documentation here](https://projectreactor.io/docs/core/3.1.2.RELEASE/reference/) for more guidance on customizing the subscription process. + +And the most important takeaway: **Nothing happens until you subscribe.** + +### 2. ```Flux```, ```Mono```, and ```subscribe()``` + +The ```Subscriber``` and ```Publisher``` are independent entities; just because the ```Subscriber``` subscribes to N events doesn't mean the ```Publisher``` has them available. ```Flux``` supports ```Publisher```s with 0, 1, or M events, where M can be finite or unbounded. The Assembly stage for a publisher with M=3 events is shown below + +```java +Flux reminderPipeline = + Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 events + .flatMap(reminder -> "Don't forget: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet +``` + +```Flux.just()``` is a [Reactor factory method](https://projectreactor.io/docs/core/release/reference/) which contrives to create a custom ```Publisher``` based on its input arguments. You could fully customize your ```Publisher``` implementation by writing a class that implements ```Publisher```; that is outside the scope of this discussion. The output of ```Flux.just()``` in the example above is a ```Publisher``` which will immediately and asynchronously push ```"Wash the dishes"```, ```"Mow the lawn"```, and ```"Sleep"``` into the pipeline as soon as it gets a ```Subscription```. Thus, upon subscription, + +```java +reminderPipeline.subscribe(System.out::println); +``` + +will output the three Strings shown and then end. + +Suppose now we want to add two special behaviors to our program: (1) After all M Strings have been printed, print "End of reminders." so the user knows we are finished. (2) Print the stack trace for any ```Exception```s which occur during execution. A modification to the ```subscribe()``` call handles all of this: + +```java +reminderPipeline.subscribe(strIn -> { + System.out.println(strIn); +}, +err -> { + err.printStackTrace(); +}, +() -> { + System.out.println("End of reminders."); +}); +``` + +Let's break this down. Remember we said that the argument to ```subscribe()``` determines how the ```Subscriber``` handles ```onNext```? I will mention two additional signals which Reactor uses to propagate status information along the pipeline: ```onComplete```, and ```onError```. Both signals denote completion of the Stream; only ```onComplete``` represents successful completion. The ```onError``` signal is associated with an ```Exception``` instance related to an error; the ```onComplete``` signal has no associated data. + +As it turns out, we can supply additional code to ```subscribe()``` in the form of Java 8 lambdas and handle ```onComplete``` and ```onError``` as well as ```onNext```! Picking apart the code snippet above, + +* ```strIn -> {...}``` defines a lambda for handling ```onNext```, where ```strIn``` represents the data item associated with each incoming ```onNext``` signal (the name ```strIn``` is my choice, any variable name will do). +* ```err -> {...}``` defines a lambda for handling ```onError```, where ```err``` is the ```Exception```. +* ```() -> {...}``` defines a lambda for handling ```onComplete```, and notice there is no data associated (empty parentheses). The ```Publisher``` will issue ```onComplete``` when it has exhausted all events that it was created to issue. + +For the special cases of M=0 and M=1 for the ```Publisher```, Reactor provides a special-purpose ```Mono``` class for representing the async operation pipeline. + +```java +Mono reminderPipeline = + Mono.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event + .flatMap(reminder -> "Act now: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); +``` + +Again, ```Mono.just()``` is a Reactor factory method which creates the single-event publisher. This ```Publisher``` will push its argument into the Reactive Stream pipeline with an ```onNext``` signal and then optionally issue an ```onComplete``` signal indicating completion. + +## For More Information + +* If you would like to learn more about Project Reactor and Reactive Streams, or get started writing code using Reactor, you can visit [the Project Reactor website.](https://projectreactor.io/) + +* [A gentle introduction to Reactor from tech.io](https://tech.io/playgrounds/929/reactive-programming-with-reactor-3/Intro) + +* [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM), a project of ReactiveX **which is no longer used in new Azure SDKs** diff --git a/reactor-rxjava-guide.md b/reactor-rxjava-guide.md new file mode 100644 index 0000000..01cdcc4 --- /dev/null +++ b/reactor-rxjava-guide.md @@ -0,0 +1,119 @@ +# Reactor vs RxJava guide + +The purpose of this guide is to help those who are more familiar with the RxJava framework to familiarize themselves with the Reactor framework and Azure Cosmos DB Java SDK 4.0 for Core (SQL) API ("Java SDK 4.0" from here on out.) + +Users of Async Java SDK 2.x.x should read this guide to understand how familiar async tasks can be performed in Reactor. We recommend first reading the [Reactor pattern guide](reactor-pattern-guide.md) for more general Reactor introduction. + +A quick refresher on Java SDK versions: + +| Java SDK | Release Date | Bundled APIs | Maven Jar | Java package name |API Reference | Release Notes | +|-------------------------|--------------|----------------------|-----------------------------------------|----------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------| +| Async 2.x.x | June 2018 | Async(RxJava) | com.microsoft.azure::azure-cosmosdb | com.microsoft.azure.cosmosdb.rx | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-async-java) | +| "Legacy" Sync 2.x.x | Sept 2018 | Sync | com.microsoft.azure::azure-documentdb | com.microsoft.azure.cosmosdb | [API](https://azure.github.io/azure-cosmosdb-java/2.0.0/) | [Release Notes](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-java) | +| 3.x.x | July 2019 | Async(Reactor)/Sync | com.microsoft.azure::azure-cosmos | com.azure.data.cosmos | [API](https://azure.github.io/azure-cosmosdb-java/3.0.0/) | - | +| 4.0 | April 2020 | Async(Reactor)/Sync | com.azure::azure-cosmos | com.azure.cosmos | - | - | + +## Background + +[Reactive Streams](http://www.reactive-streams.org/) is an industry standard for declarative dataflow programming in an asynchronous environment. More detail on design principles can be found in the [Reactive Manifesto](https://www.reactivemanifesto.org/). It is the basis for Azure's async Java SDKs going forward. + +A Reactive Streams framework implements the Reactive Streams Standard for specific programming languages. + +The [RxJava](https://github.com/ReactiveX/RxJava) ([ReactiveX](http://reactivex.io/) for JVM) framework was the basis of past Azure Java SDKs, but will not be going forward. Async Java SDK 2.x.x was implemented using RxJava 1; in this guide we will assume that RxJava 1 is the version you are already familiar with i.e. as a result of working with the Async Java SDK 2.x.x. + +[Project Reactor](https://projectreactor.io/) or just *Reactor* is the Reactive Programming framework being used for new Azure Java SDKs. The purpose of the rest of this document is to help you get started with Reactor. + +## Comparison between Reactor and RxJava + +RxJava 1 provides a framework for implementing the **Observer Pattern** in your application. In the Observer Pattern, +* ```Observable```s are entities that receive events and data (i.e. UI, keyboard, TCP, ...) from outside sources, and make those events and data available to your program. +* ```Observer```s are the entities which subscribe to the Observable events and data. + +The [Reactor pattern guide](reactor-pattern-guide.md) gives a brief conceptual overview of Reactor. In summary: +* ```Publisher```s are the entities which make events and data from outside sources available to the program +* ```Subscriber```s subscribe to the events and data from the ```Publisher``` + +Both frameworks facilitate asynchronous, event-driven programming. Both frameworks allow you to chain together a pipeline of operations between Observable/Observer or Publisher/Subscriber. + +Roughly, what you would use an ```Observable``` for in RxJava, you would use a ```Flux``` for in Reactor. And what you would use a ```Single``` for in RxJava, you would use a ```Mono``` for in Reactor. + +The critical difference between the two frameworks is really in the core implementation: +Reactor operates a service which receives event/data pairs serially from a ```Publisher```, demultiplexes them, and forwards them to registered ```Subscribers```. This model was designed to help servers efficiently dispatch requests in a distributed system. +The RxJava approach is more general-purpose. ```Observer```s subscribe directly to the ```Observable``` and the ```Observable``` sends events and data directly to ```Observer```s, with no central service handling dispatch. + +### Summary: rules of thumb to convert RxJava code into Reactor code + +* An RxJava ```Observable``` will become a Reactor ```Flux``` + +* An RxJava ```Single``` will become a Reactor ```Mono``` + +* An RxJava ```Subscriber``` is still a ```Subscriber``` in Reactor + +* Operators such as ```map()```, ```filter()```, and ```flatMap()``` are the same + +## Examples of tasks in Reactor and RxJava + +* Reminder app example from the [Reactor pattern guide](reactor-pattern-guide.md) + +**Reactor:** +```java +ReminderAsyncService.getRemindersPublisher() // Pipeline Stage 1 + .flatMap(reminder -> "Don't forget: " + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Stage 3 + .subscribe(System.out::println); +``` + +**RxJava:** +```java +ReminderAsyncService.getRemindersObservable() // Pipeline Stage 1 + .flatMap(reminder -> "Don't forget: " + reminder) // Stage 2 + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Stage 3 + .subscribe(item -> System.out.println(item)); +``` + +* Three-event ```Publisher``` example from the [Reactor pattern guide](reactor-pattern-guide.md) + +**Reactor:** +```java +Flux.just("Wash the dishes","Mow the lawn","Sleep") // Publisher, 3 events + .flatMap(reminder -> "Don't forget: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet + .subscribe(strIn -> {"" + System.out.println(strIn); + }, + err -> { + err.printStackTrace(); + }, + () -> { + System.out.println("End of reminders."); +}); +``` + +**RxJava:** +```java +Observable.just("Wash the dishes","Mow the lawn","Sleep") // Observable, 3 events + .flatMap(reminder -> "Don't forget: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); // Nothing executed yet + .subscribe(strIn -> System.out.println(strIn), + err -> err.printStackTrace(), + () -> System.out.println("End of reminders.") +); +``` + +* Mono example from the [Reactor pattern guide](reactor-pattern-guide.md) + +**Reactor:** +```java +Mono.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event + .flatMap(reminder -> "Act now: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); + .subscribe(System.out::println); +``` + +**RxJava:**' +```java +Single.just("Are you sure you want to cancel your Reminders service?") // Publisher, 1 event + .flatMap(reminder -> "Act now: " + reminder) + .flatMap(strIn -> LocalDateTime.now().toString() + ": "+ strIn); + .subscribe(item -> System.out.println(item)); +``` diff --git a/src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java new file mode 100644 index 0000000..fe73be1 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/analyticalcontainercrud/sync/AnalyticalContainerCRUDQuickstart.java @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.analyticalcontainercrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AnalyticalContainerCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following ANALYTICAL STORE container CRUD operations: + * -Create + * -Update throughput + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + AnalyticalContainerCRUDQuickstart p = new AnalyticalContainerCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.containerCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void containerCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + readContainerById(); + readAllContainers(); + // deleteAContainer() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Set analytical store properties + containerProperties.setAnalyticalStoreTimeToLiveInSeconds(-1); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + // Update container throughput + private void updateContainerThroughput() throws Exception { + logger.info("Update throughput for container " + containerName + "."); + + // Specify new throughput value + container.replaceProvisionedThroughput(400); + + logger.info("Done."); + } + + // Container read + private void readContainerById() throws Exception { + logger.info("Read container " + containerName + " by ID."); + + // Read container by ID + container = database.getContainer(containerName); + + logger.info("Done."); + } + + // Container read all + private void readAllContainers() throws Exception { + logger.info("Read all containers in database " + databaseName + "."); + + // Read all containers in the account + CosmosPagedIterable containers = database.readAllContainers(new FeedOptions()); + + // Print + String msg="Listing containers in database:\n"; + for(CosmosContainerProperties containerProps : containers) { + msg += String.format("-Container ID: %s\n",containerProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Container delete + private void deleteAContainer() throws Exception { + logger.info("Delete container " + containerName + " by ID."); + + // Delete container + CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); + logger.info("Status code for container delete: {}",containerResp.getStatusCode()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteAContainer(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java new file mode 100644 index 0000000..8ffcb2d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/autoscalecontainercrud/sync/AutoscaleContainerCRUDQuickstart.java @@ -0,0 +1,195 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.autoscalecontainercrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AutoscaleContainerCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following AUTOSCALE container CRUD operations: + * -Create + * -Update throughput + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + AutoscaleContainerCRUDQuickstart p = new AutoscaleContainerCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.autoscaleContainerCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void autoscaleContainerCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + readContainerById(); + readAllContainers(); + // deleteAContainer() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create autoscale container " + containerName + " if not exists."); + + // Container and autoscale throughput settings + CosmosContainerProperties autoscaleContainerProperties = new CosmosContainerProperties(containerName, "/lastName"); + ThroughputProperties autoscaleThroughputProperties = ThroughputProperties.createAutoscaledThroughput(200); //Set autoscale max RU/s + + // Create the container with autoscale enabled + container = database.createContainer(autoscaleContainerProperties, autoscaleThroughputProperties, + new CosmosContainerRequestOptions()).getContainer(); + + logger.info("Done."); + } + + // Update container throughput + private void updateContainerThroughput() throws Exception { + logger.info("Update autoscale max throughput for container " + containerName + "."); + + // Change the autoscale max throughput (RU/s) + container.replaceThroughput(ThroughputProperties.createAutoscaledThroughput(400)); + + logger.info("Done."); + } + + private void readContainerThroughput() throws Exception { + // Read the throughput on a resource + ThroughputProperties autoscaleContainerThroughput = container.readThroughput().getProperties(); + + // The autoscale max throughput (RU/s) of the resource + int autoscaleMaxThroughput = autoscaleContainerThroughput.getAutoscaleMaxThroughput(); + + // The throughput (RU/s) the resource is currently scaled to + int currentThroughput = autoscaleContainerThroughput.Throughput; + + logger.info("Autoscale max throughput: {} current throughput: {}",autoscaleMaxThroughput,currentThroughput); + } + + // Container read + private void readContainerById() throws Exception { + logger.info("Read container " + containerName + " by ID."); + + // Read container by ID + container = database.getContainer(containerName); + + logger.info("Done."); + } + + // Container read all + private void readAllContainers() throws Exception { + logger.info("Read all containers in database " + databaseName + "."); + + // Read all containers in the account + CosmosPagedIterable containers = database.readAllContainers(new FeedOptions()); + + // Print + String msg="Listing containers in database:\n"; + for(CosmosContainerProperties containerProps : containers) { + msg += String.format("-Container ID: %s\n",containerProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Container delete + private void deleteAContainer() throws Exception { + logger.info("Delete container " + containerName + " by ID."); + + // Delete container + CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); + logger.info("Status code for container delete: {}",containerResp.getStatusCode()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteAContainer(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java new file mode 100644 index 0000000..602913d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/autoscaledatabasecrud/sync/AutoscaleDatabaseCRUDQuickstart.java @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.autoscaledatabasecrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosDatabaseProperties; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class AutoscaleDatabaseCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + + private CosmosDatabase database; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following AUTOSCALE database CRUD operations: + * -Create + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + AutoscaleDatabaseCRUDQuickstart p = new AutoscaleDatabaseCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.autoscaleDatabaseCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void autoscaleDatabaseCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + readDatabaseById(); + readAllDatabases(); + // deleteADatabase() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Autoscale throughput settings + ThroughputProperties autoscaleThroughputProperties = ThroughputProperties.createAutoscaledThroughput(400); //Set autoscale max RU/s + + //Create the database with autoscale enabled + CosmosDatabase database = client.createDatabase(databaseName, autoscaleThroughputProperties).getDatabase(); + + logger.info("Done."); + } + + // Database read + private void readDatabaseById() throws Exception { + logger.info("Read database " + databaseName + " by ID."); + + // Read database by ID + database = client.getDatabase(databaseName); + + logger.info("Done."); + } + + // Database read all + private void readAllDatabases() throws Exception { + logger.info("Read all databases in the account."); + + // Read all databases in the account + CosmosPagedIterable databases = client.readAllDatabases(new FeedOptions()); + + // Print + String msg="Listing databases in account:\n"; + for(CosmosDatabaseProperties dbProps : databases) { + msg += String.format("-Database ID: %s\n",dbProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java new file mode 100644 index 0000000..5c07fea --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleChangeFeedProcessor.java @@ -0,0 +1,269 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples.changefeed; + +import com.azure.cosmos.ChangeFeedProcessor; +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.examples.common.CustomPOJO; +import com.azure.cosmos.implementation.Utils; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang3.RandomStringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.scheduler.Schedulers; + +import java.time.Duration; +import java.util.List; + +/** + * Sample for Change Feed Processor. + * This sample models an application where documents are being inserted into one container (the "feed container"), + * and meanwhile another worker thread or worker application is pulling inserted documents from the feed container's Change Feed + * and operating on them in some way. For one or more workers to process the Change Feed of a container, the workers must first contact the server + * and "lease" access to monitor one or more partitions of the feed container. The Change Feed Processor Library + * handles leasing automatically for you, however you must create a separate "lease container" where the Change Feed + * Processor Library can store and track leases container partitions. + */ +public class SampleChangeFeedProcessor { + + public static int WAIT_FOR_WORK = 60000; + public static final String DATABASE_NAME = "db_" + RandomStringUtils.randomAlphabetic(7); + public static final String COLLECTION_NAME = "coll_" + RandomStringUtils.randomAlphabetic(7); + private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + + private static ChangeFeedProcessor changeFeedProcessorInstance; + private static boolean isWorkCompleted = false; + + public static void main(String[] args) { + logger.info("BEGIN Sample"); + + try { + + //Summary of the next four commands: + //-Create an asynchronous Azure Cosmos DB client and database so that we can issue async requests to the DB + //-Create a "feed container" and a "lease container" in the DB + logger.info("-->CREATE DocumentClient"); + CosmosAsyncClient client = getCosmosClient(); + + logger.info("-->CREATE sample's database: " + DATABASE_NAME); + CosmosAsyncDatabase cosmosDatabase = createNewDatabase(client, DATABASE_NAME); + + logger.info("-->CREATE container for documents: " + COLLECTION_NAME); + CosmosAsyncContainer feedContainer = createNewCollection(client, DATABASE_NAME, COLLECTION_NAME); + + logger.info("-->CREATE container for lease: " + COLLECTION_NAME + "-leases"); + CosmosAsyncContainer leaseContainer = createNewLeaseCollection(client, DATABASE_NAME, COLLECTION_NAME + "-leases"); + + //Model of a worker thread or application which leases access to monitor one or more feed container + //partitions via the Change Feed. In a real-world application you might deploy this code in an Azure function. + //The next line causes the worker to create and start an instance of the Change Feed Processor. See the implementation of getChangeFeedProcessor() for guidance + //on creating a handler for Change Feed events. In this stream, we also trigger the insertion of 10 documents on a separate + //thread. + logger.info("-->START Change Feed Processor on worker (handles changes asynchronously)"); + changeFeedProcessorInstance = getChangeFeedProcessor("SampleHost_1", feedContainer, leaseContainer); + changeFeedProcessorInstance.start() + .subscribeOn(Schedulers.elastic()) + .doOnSuccess(aVoid -> { + //pass + }) + .subscribe(); + + //These two lines model an application which is inserting ten documents into the feed container + logger.info("-->START application that inserts documents into feed container"); + createNewDocumentsCustomPOJO(feedContainer, 10, Duration.ofSeconds(3)); + isWorkCompleted = true; + + //This loop models the Worker main loop, which spins while its Change Feed Processor instance asynchronously + //handles incoming Change Feed events from the feed container. Of course in this sample, polling + //isWorkCompleted is unnecessary because items are being added to the feed container on the same thread, and you + //can see just above isWorkCompleted is set to true. + //But conceptually the worker is part of a different thread or application than the one which is inserting + //into the feed container; so this code illustrates the worker waiting and listening for changes to the feed container + long remainingWork = WAIT_FOR_WORK; + while (!isWorkCompleted && remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + + //When all documents have been processed, clean up + if (isWorkCompleted) { + if (changeFeedProcessorInstance != null) { + changeFeedProcessorInstance.stop().subscribe(); + } + } else { + throw new RuntimeException("The change feed processor initialization and automatic create document feeding process did not complete in the expected time"); + } + + logger.info("-->DELETE sample's database: " + DATABASE_NAME); + deleteDatabase(cosmosDatabase); + + Thread.sleep(500); + + } catch (Exception e) { + e.printStackTrace(); + } + + logger.info("END Sample"); + } + + public static ChangeFeedProcessor getChangeFeedProcessor(String hostName, CosmosAsyncContainer feedContainer, CosmosAsyncContainer leaseContainer) { + return ChangeFeedProcessor.changeFeedProcessorBuilder() + .setHostName(hostName) + .setFeedContainer(feedContainer) + .setLeaseContainer(leaseContainer) + .setHandleChanges((List docs) -> { + logger.info("--->setHandleChanges() START"); + + for (JsonNode document : docs) { + try { + //Change Feed hands the document to you in the form of a JsonNode + //As a developer you have two options for handling the JsonNode document provided to you by Change Feed + //One option is to operate on the document in the form of a JsonNode, as shown below. This is great + //especially if you do not have a single uniform data model for all documents. + logger.info("---->DOCUMENT RECEIVED: " + OBJECT_MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(document)); + + //You can also transform the JsonNode to a POJO having the same structure as the JsonNode, + //as shown below. Then you can operate on the POJO. + CustomPOJO pojo_doc = OBJECT_MAPPER.treeToValue(document, CustomPOJO.class); + logger.info("----=>id: " + pojo_doc.getId()); + + } catch (JsonProcessingException e) { + e.printStackTrace(); + } + } + logger.info("--->handleChanges() END"); + + }) + .build(); + } + + public static CosmosAsyncClient getCosmosClient() { + + return new CosmosClientBuilder() + .setEndpoint(SampleConfigurations.HOST) + .setKey(SampleConfigurations.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + } + + public static CosmosAsyncDatabase createNewDatabase(CosmosAsyncClient client, String databaseName) { + return client.createDatabaseIfNotExists(databaseName).block().getDatabase(); + } + + public static void deleteDatabase(CosmosAsyncDatabase cosmosDatabase) { + cosmosDatabase.delete().block(); + } + + public static CosmosAsyncContainer createNewCollection(CosmosAsyncClient client, String databaseName, String collectionName) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer collectionLink = databaseLink.getContainer(collectionName); + CosmosAsyncContainerResponse containerResponse = null; + + try { + containerResponse = collectionLink.read().block(); + + if (containerResponse != null) { + throw new IllegalArgumentException(String.format("Collection %s already exists in database %s.", collectionName, databaseName)); + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(collectionName, "/id"); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + containerResponse = databaseLink.createContainer(containerSettings, 10000, requestOptions).block(); + + if (containerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", collectionName, databaseName)); + } + + return containerResponse.getContainer(); + } + + public static CosmosAsyncContainer createNewLeaseCollection(CosmosAsyncClient client, String databaseName, String leaseCollectionName) { + CosmosAsyncDatabase databaseLink = client.getDatabase(databaseName); + CosmosAsyncContainer leaseCollectionLink = databaseLink.getContainer(leaseCollectionName); + CosmosAsyncContainerResponse leaseContainerResponse = null; + + try { + leaseContainerResponse = leaseCollectionLink.read().block(); + + if (leaseContainerResponse != null) { + leaseCollectionLink.delete().block(); + + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + } catch (RuntimeException ex) { + if (ex instanceof CosmosClientException) { + CosmosClientException cosmosClientException = (CosmosClientException) ex; + + if (cosmosClientException.getStatusCode() != 404) { + throw ex; + } + } else { + throw ex; + } + } + + CosmosContainerProperties containerSettings = new CosmosContainerProperties(leaseCollectionName, "/id"); + CosmosContainerRequestOptions requestOptions = new CosmosContainerRequestOptions(); + + leaseContainerResponse = databaseLink.createContainer(containerSettings, 400, requestOptions).block(); + + if (leaseContainerResponse == null) { + throw new RuntimeException(String.format("Failed to create collection %s in database %s.", leaseCollectionName, databaseName)); + } + + return leaseContainerResponse.getContainer(); + } + + public static void createNewDocumentsCustomPOJO(CosmosAsyncContainer containerClient, int count, Duration delay) { + String suffix = RandomStringUtils.randomAlphabetic(10); + for (int i = 0; i <= count; i++) { + CustomPOJO document = new CustomPOJO(); + document.setId(String.format("0%d-%s", i, suffix)); + + containerClient.createItem(document).subscribe(doc -> { + logger.info("---->DOCUMENT WRITE: " + doc); + }); + + long remainingWork = delay.toMillis(); + try { + while (remainingWork > 0) { + Thread.sleep(100); + remainingWork -= 100; + } + } catch (InterruptedException iex) { + // exception caught + break; + } + } + } +} diff --git a/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java b/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java new file mode 100644 index 0000000..f373f7e --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/changefeed/SampleConfigurations.java @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.examples.changefeed; + +import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the configurations for tests. + *

+ * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ *

+ * If none of the above is set, emulator endpoint will be used. + */ +public final class SampleConfigurations { + // REPLACE MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(Strings.emptyToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:8081/")); +} diff --git a/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java b/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java new file mode 100644 index 0000000..64a7776 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/AccountSettings.java @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +import org.apache.commons.lang3.StringUtils; + +/** + * Contains the account configurations for Sample. + *

+ * For running tests, you can pass a customized endpoint configuration in one of the following + * ways: + *

    + *
  • -DACCOUNT_KEY="[your-key]" -ACCOUNT_HOST="[your-endpoint]" as JVM + * command-line option.
  • + *
  • You can set ACCOUNT_KEY and ACCOUNT_HOST as environment variables.
  • + *
+ *

+ * If none of the above is set, emulator endpoint will be used. + * Emulator http cert is self signed. If you are using emulator, + * make sure emulator https certificate is imported + * to java trusted cert store: + * https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator-export-ssl-certificates + */ +public class AccountSettings { + // Replace MASTER_KEY and HOST with values from your Azure Cosmos DB account. + // The default values are credentials of the local emulator, which are not used in any production environment. + // + public static String MASTER_KEY = + System.getProperty("ACCOUNT_KEY", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("ACCOUNT_KEY")), + "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==")); + + public static String HOST = + System.getProperty("ACCOUNT_HOST", + StringUtils.defaultString(StringUtils.trimToNull( + System.getenv().get("ACCOUNT_HOST")), + "https://localhost:443/")); +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Address.java b/src/main/java/com/azure/cosmos/examples/common/Address.java new file mode 100644 index 0000000..9abbf3f --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Address.java @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Address { + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public String getCounty() { + return county; + } + + public void setCounty(String county) { + this.county = county; + } + + public String getCity() { + return city; + } + + public void setCity(String city) { + this.city = city; + } + + private String state=""; + private String county=""; + private String city=""; +} + diff --git a/src/main/java/com/azure/cosmos/examples/common/Child.java b/src/main/java/com/azure/cosmos/examples/common/Child.java new file mode 100644 index 0000000..98cdd5c --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Child.java @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Child { + public String getFamilyName() { + return familyName; + } + + public void setFamilyName(String familyName) { + this.familyName = familyName; + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getGender() { + return gender; + } + + public void setGender(String gender) { + this.gender = gender; + } + + public int getGrade() { + return grade; + } + + public void setGrade(int grade) { + this.grade = grade; + } + + public Pet[] getPets() { + return pets; + } + + public void setPets(Pet[] pets) { + this.pets = pets; + } + + private String familyName; + private String firstName; + private String gender; + private int grade; + private Pet[] pets; +} + diff --git a/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java b/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java new file mode 100644 index 0000000..0341d1a --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/CustomPOJO.java @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class CustomPOJO { + private String id; + + public CustomPOJO() { + + } + + public CustomPOJO(String id) { + this.id = id; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Families.java b/src/main/java/com/azure/cosmos/examples/common/Families.java new file mode 100644 index 0000000..1f658ae --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Families.java @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Families { + + public static Family getAndersenFamilyItem() { + Family andersenFamily = new Family(); + andersenFamily.setId("Andersen-" + System.currentTimeMillis()); + andersenFamily.setLastName("Andersen"); + + Parent parent1 = new Parent(); + parent1.setFirstName("Thomas"); + + Parent parent2 = new Parent(); + parent2.setFirstName("Mary Kay"); + + andersenFamily.setParents(new Parent[]{parent1, parent2}); + + Child child1 = new Child(); + child1.setFirstName("Henriette Thaulow"); + child1.setGender("female"); + child1.setGrade(5); + + Pet pet1 = new Pet(); + pet1.setGivenName("Fluffy"); + + child1.setPets(new Pet[]{pet1}); + + andersenFamily.setDistrict("WA5"); + Address address = new Address(); + address.setCity("Seattle"); + address.setCounty("King"); + address.setState("WA"); + + andersenFamily.setAddress(address); + andersenFamily.setRegistered(true); + + return andersenFamily; + } + + public static Family getWakefieldFamilyItem() { + Family wakefieldFamily = new Family(); + wakefieldFamily.setId("Wakefield-" + System.currentTimeMillis()); + wakefieldFamily.setLastName("Wakefield"); + + Parent parent1 = new Parent(); + parent1.setFamilyName("Wakefield"); + parent1.setFirstName("Robin"); + + Parent parent2 = new Parent(); + parent2.setFamilyName("Miller"); + parent2.setFirstName("Ben"); + + wakefieldFamily.setParents(new Parent[]{parent1, parent2}); + + Child child1 = new Child(); + child1.setFirstName("Jesse"); + child1.setFamilyName("Merriam"); + child1.setGrade(8); + + Pet pet1 = new Pet(); + pet1.setGivenName("Goofy"); + + Pet pet2 = new Pet(); + pet2.setGivenName("Shadow"); + + child1.setPets(new Pet[]{pet1, pet2}); + + Child child2 = new Child(); + child2.setFirstName("Lisa"); + child2.setFamilyName("Miller"); + child2.setGrade(1); + child2.setGender("female"); + + wakefieldFamily.setChildren(new Child[]{child1, child2}); + + Address address = new Address(); + address.setCity("NY"); + address.setCounty("Manhattan"); + address.setState("NY"); + + wakefieldFamily.setAddress(address); + wakefieldFamily.setDistrict("NY23"); + wakefieldFamily.setRegistered(true); + return wakefieldFamily; + } + + public static Family getJohnsonFamilyItem() { + Family andersenFamily = new Family(); + andersenFamily.setId("Johnson-" + System.currentTimeMillis()); + andersenFamily.setLastName("Johnson"); + + Parent parent1 = new Parent(); + parent1.setFirstName("John"); + + Parent parent2 = new Parent(); + parent2.setFirstName("Lili"); + + return andersenFamily; + } + + public static Family getSmithFamilyItem() { + Family andersenFamily = new Family(); + andersenFamily.setId("Smith-" + System.currentTimeMillis()); + andersenFamily.setLastName("Smith"); + + Parent parent1 = new Parent(); + parent1.setFirstName("John"); + + Parent parent2 = new Parent(); + parent2.setFirstName("Cynthia"); + + return andersenFamily; + } +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Family.java b/src/main/java/com/azure/cosmos/examples/common/Family.java new file mode 100644 index 0000000..9a3c389 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Family.java @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Family { + public Family() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getDistrict() { + return district; + } + + public void setDistrict(String district) { + this.district = district; + } + + public Parent[] getParents() { + return parents; + } + + public void setParents(Parent[] parents) { + this.parents = parents; + } + + public Child[] getChildren() { + return children; + } + + public void setChildren(Child[] children) { + this.children = children; + } + + public Address getAddress() { + return address; + } + + public void setAddress(Address address) { + this.address = address; + } + + public boolean isRegistered() { + return isRegistered; + } + + public void setRegistered(boolean isRegistered) { + this.isRegistered = isRegistered; + } + + private String id=""; + private String lastName=""; + private String district=""; + private Parent[] parents={}; + private Child[] children={}; + private Address address=new Address(); + private boolean isRegistered=false; +} + diff --git a/src/main/java/com/azure/cosmos/examples/common/Parent.java b/src/main/java/com/azure/cosmos/examples/common/Parent.java new file mode 100644 index 0000000..d7753a8 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Parent.java @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Parent { + + public Parent() { + } + + public Parent(String firstName) { + this.firstName = firstName; + } + + public String getFamilyName() { + return familyName; + } + + public void setFamilyName(String familyName) { + this.familyName = familyName; + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + private String familyName; + private String firstName; +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Pet.java b/src/main/java/com/azure/cosmos/examples/common/Pet.java new file mode 100644 index 0000000..062ce93 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Pet.java @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.common; + +public class Pet { + public String getGivenName() { + return givenName; + } + + public void setGivenName(String givenName) { + this.givenName = givenName; + } + + private String givenName; +} diff --git a/src/main/java/com/azure/cosmos/examples/common/Profile.java b/src/main/java/com/azure/cosmos/examples/common/Profile.java new file mode 100644 index 0000000..8471fde --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/common/Profile.java @@ -0,0 +1,52 @@ +package com.azure.cosmos.examples.common; + +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.implementation.Utils; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.UUID; + +public class Profile { + + private static long tic_ns = System.nanoTime(); // For execution timing + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + /* tic/toc pair - measure ms execution time between tic() and toc_ms() + Undefined behavior if you you do not pair 'tic()' followed by 'toc_ms()' + */ + public static void tic() {tic_ns = System.nanoTime();} + public static double toc_ms() {return ((double)(System.nanoTime()-tic_ns))/1000000.0;}; + + /* Generate ArrayList of N unique documents (assumes /pk is id) */ + public static ArrayList generateDocs(int N) { + ArrayList docs = new ArrayList(); + ObjectMapper mapper = Utils.getSimpleObjectMapper(); + + try { + for (int i = 1; i <= N; i++) { + docs.add(mapper.readTree( + "{" + + "\"id\": " + + "\"" + UUID.randomUUID().toString() + "\"" + + "}" + )); + + + } + } catch (Exception err) { + logger.error("Failed generating documents: ", err); + } + + return docs; + } + + /* Placeholder for background tasks to run during resource creation */ + public static void doOtherThings() { + // Not much to do right now :) + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java new file mode 100644 index 0000000..771679e --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/containercrud/sync/ContainerCRUDQuickstart.java @@ -0,0 +1,181 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.containercrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ContainerCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following container CRUD operations: + * -Create + * -Update throughput + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + ContainerCRUDQuickstart p = new ContainerCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.containerCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void containerCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + readContainerById(); + readAllContainers(); + // deleteAContainer() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + // Update container throughput + private void updateContainerThroughput() throws Exception { + logger.info("Update throughput for container " + containerName + "."); + + // Specify new throughput value + container.replaceProvisionedThroughput(400); + + logger.info("Done."); + } + + // Container read + private void readContainerById() throws Exception { + logger.info("Read container " + containerName + " by ID."); + + // Read container by ID + container = database.getContainer(containerName); + + logger.info("Done."); + } + + // Container read all + private void readAllContainers() throws Exception { + logger.info("Read all containers in database " + databaseName + "."); + + // Read all containers in the account + CosmosPagedIterable containers = database.readAllContainers(new FeedOptions()); + + // Print + String msg="Listing containers in database:\n"; + for(CosmosContainerProperties containerProps : containers) { + msg += String.format("-Container ID: %s\n",containerProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Container delete + private void deleteAContainer() throws Exception { + logger.info("Delete container " + containerName + " by ID."); + + // Delete container + CosmosContainerResponse containerResp = database.getContainer(containerName).delete(new CosmosContainerRequestOptions()); + logger.info("Status code for container delete: {}",containerResp.getStatusCode()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteAContainer(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java new file mode 100644 index 0000000..81298b5 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/async/SampleCRUDQuickstartAsync.java @@ -0,0 +1,372 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.crudquickstart.async; + + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosPagedFlux; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosAsyncDatabaseResponse; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; + +public class SampleCRUDQuickstartAsync { + + private CosmosAsyncClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + *

+ * This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations + * with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will + * 1. Create asynchronous client, database and container instances + * 2. Create several items + * 3. Upsert one of the items + * 4. Perform a query over the items + * 5. Delete an item + * 6. Delete the Cosmos DB database and container resources and close the client. + */ + //

+ public static void main(String[] args) { + SampleCRUDQuickstartAsync p = new SampleCRUDQuickstartAsync(); + + try { + logger.info("Starting ASYNC main"); + p.getStartedDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + //
+ + private void getStartedDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create async client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + // + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + Family andersenFamilyItem = Families.getAndersenFamilyItem(); + Family wakefieldFamilyItem = Families.getWakefieldFamilyItem(); + Family johnsonFamilyItem = Families.getJohnsonFamilyItem(); + Family smithFamilyItem = Families.getSmithFamilyItem(); + + // Setup family items to create + Flux familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + // Creates several items in the container + createFamilies(familiesToCreate); + + // Upsert one of the items in the container + upsertFamily(wakefieldFamilyItem); + + familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + + logger.info("Deleting an item."); + deleteItem(andersenFamilyItem); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); + databaseIfNotExists.flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + logger.info("Checking database " + database.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + // + } + + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + // + + CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + Mono containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400); + + // Create container with 400 RU/s + CosmosAsyncContainerResponse cosmosContainerResponse = containerIfNotExists.block(); + container = cosmosContainerResponse.getContainer(); + // + + //Modify existing container + containerProperties = cosmosContainerResponse.getProperties(); + Mono propertiesReplace = container.replace(containerProperties, new CosmosContainerRequestOptions()); + propertiesReplace.flatMap(containerResponse -> { + logger.info("setupContainer(): Container " + container.getId() + " in " + database.getId() + + "has been updated with it's new properties."); + return Mono.empty(); + }).onErrorResume((exception) -> { + logger.error("setupContainer(): Unable to update properties for container " + container.getId() + + " in database " + database.getId() + + ". e: " + exception.getLocalizedMessage()); + return Mono.empty(); + }).block(); + + } + + private void createFamilies(Flux families) throws Exception { + + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + // Combine multiple item inserts, associated success println's, and a final aggregate stats println into one Reactive stream. + families.flatMap(family -> { + return container.createItem(family); + }) //Flux of item request responses + .flatMap(itemResponse -> { + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + logger.info(String.format("Item ID: %s\n", itemResponse.getItem().getId())); + return Mono.just(itemResponse.getRequestCharge()); + }) //Flux of request charges + .reduce(0.0, + (charge_n, charge_nplus1) -> charge_n + charge_nplus1 + ) //Mono of total charge - there will be only one item in this stream + .subscribe(charge -> { + logger.info(String.format("Created items with total request charge of %.2f\n", + charge)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); //Preserve the total charge and print aggregate charge/item count stats. + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + + // + } + + private void upsertFamily(Family family_to_upsert) { + //Modify a field of the family object + logger.info(String.format("Upserting the item with id %s after modifying the isRegistered field...", family_to_upsert.getId())); + family_to_upsert.setRegistered(!family_to_upsert.isRegistered()); + + //Upsert the modified item + Mono.just(family_to_upsert).flatMap(item -> { + CosmosAsyncItemResponse item_resp = container.upsertItem(family_to_upsert).block(); + + // Get upsert request charge and other properties like latency, and diagnostics strings, etc. + logger.info(String.format("Upserted item with request charge of %.2f within duration %s", + item_resp.getRequestCharge(), item_resp.getRequestLatency())); + + return Mono.empty(); + }).subscribe(); + } + + private void readItems(Flux familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + familiesToCreate.flatMap(family -> { + Mono> asyncItemResponseMono = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + return asyncItemResponseMono; + }) + .subscribe( + itemResponse -> { + double requestCharge = itemResponse.getRequestCharge(); + Duration requestLatency = itemResponse.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + itemResponse.getItem().getId(), requestCharge, requestLatency)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + + // + } + + private void queryItems() { + // + // Set some common query options + + FeedOptions queryOptions = new FeedOptions(); + queryOptions.setMaxItemCount(10); + //queryOptions.setEnableCrossPartitionQuery(true); //No longer needed in SDK v4 + // Set populate query metrics to get metrics around query executions + queryOptions.setPopulateQueryMetrics(true); + + CosmosPagedFlux pagedFluxResponse = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + pagedFluxResponse.byPage().subscribe( + fluxResponse -> { + logger.info("Got a page of query result with " + + fluxResponse.getResults().size() + " items(s)" + + " and request charge of " + fluxResponse.getRequestCharge()); + + logger.info("Item Ids " + fluxResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + + // + } + + private void deleteItem(Family item) { + container.deleteItem(item.getId(), new PartitionKey(item.getLastName())).block(); + } + + private void shutdown() { + try { + //Clean shutdown + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); + if (container != null) + container.delete().subscribe(); + logger.info("-Deleting database..."); + if (database != null) + database.delete().subscribe(); + logger.info("-Closing the client..."); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done."); + } +} + diff --git a/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java new file mode 100644 index 0000000..0659d25 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/crudquickstart/sync/SampleCRUDQuickstart.java @@ -0,0 +1,249 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.crudquickstart.sync; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class SampleCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + *

+ * This is a simple sample application intended to demonstrate Create, Read, Update, Delete (CRUD) operations + * with Azure Cosmos DB Java SDK, as applied to databases, containers and items. This sample will + * 1. Create synchronous client, database and container instances + * 2. Create several items + * 3. Upsert one of the items + * 4. Perform a query over the items + * 5. Delete an item + * 6. Delete the Cosmos DB database and container resources and close the client. * + */ + //

+ public static void main(String[] args) { + SampleCRUDQuickstart p = new SampleCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.getStartedDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + //
+ + private void getStartedDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + // + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + // Setup family items to create + ArrayList familiesToCreate = new ArrayList<>(); + familiesToCreate.add(Families.getAndersenFamilyItem()); + familiesToCreate.add(Families.getWakefieldFamilyItem()); + familiesToCreate.add(Families.getJohnsonFamilyItem()); + familiesToCreate.add(Families.getSmithFamilyItem()); + + // Creates several items in the container + // Also applies an upsert operation to one of the items (create if not present, otherwise replace) + createFamilies(familiesToCreate); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + + logger.info("Delete an item."); + deleteItem(familiesToCreate.get(0)); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + // + + logger.info("Checking database " + database.getId() + " completed!\n"); + } + + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + // + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 400 RU/s + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + // + + logger.info("Checking container " + container.getId() + " completed!\n"); + } + + private void createFamilies(List families) throws Exception { + double totalRequestCharge = 0; + for (Family family : families) { + + // + // Create item using container that we created using sync client + + // Use lastName as partitionKey for cosmos item + // Using appropriate partition key improves the performance of database operations + CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); + CosmosItemResponse item = container.createItem(family, new PartitionKey(family.getLastName()), cosmosItemRequestOptions); + // + + // Get request charge and other properties like latency, and diagnostics strings, etc. + logger.info(String.format("Created item with request charge of %.2f within duration %s", + item.getRequestCharge(), item.getRequestLatency())); + + totalRequestCharge += item.getRequestCharge(); + } + logger.info(String.format("Created %d items with total request charge of %.2f", + families.size(), totalRequestCharge)); + + Family family_to_upsert = families.get(0); + logger.info(String.format("Upserting the item with id %s after modifying the isRegistered field...", family_to_upsert.getId())); + family_to_upsert.setRegistered(!family_to_upsert.isRegistered()); + + CosmosItemResponse item = container.upsertItem(family_to_upsert); + + // Get upsert request charge and other properties like latency, and diagnostics strings, etc. + logger.info(String.format("Upserted item with request charge of %.2f within duration %s", + item.getRequestCharge(), item.getRequestLatency())); + } + + private void readItems(ArrayList familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + familiesToCreate.forEach(family -> { + // + try { + CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + double requestCharge = item.getRequestCharge(); + Duration requestLatency = item.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + item.getResource().getId(), requestCharge, requestLatency)); + } catch (CosmosClientException e) { + e.printStackTrace(); + logger.info(String.format("Read Item failed with %s", e)); + } + // + }); + } + + private void queryItems() { + // + // Set some common query options + FeedOptions queryOptions = new FeedOptions(); + queryOptions.setMaxItemCount(10); + //queryOptions.setEnableCrossPartitionQuery(true); //No longer necessary in SDK v4 + // Set populate query metrics to get metrics around query executions + queryOptions.setPopulateQueryMetrics(true); + + CosmosPagedIterable familiesPagedIterable = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { + logger.info("Got a page of query result with " + + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); + + logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }); + // + } + + private void deleteItem(Family item) { + container.deleteItem(item.getId(), new PartitionKey(item.getLastName()), new CosmosItemRequestOptions()); + } + + private void shutdown() { + try { + //Clean shutdown + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); + if (container != null) + container.delete(); + logger.info("-Deleting database..."); + if (database != null) + database.delete(); + logger.info("-Closing the client..."); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done."); + } +} diff --git a/src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java new file mode 100644 index 0000000..1a34cac --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/databasecrud/sync/DatabaseCRUDQuickstart.java @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.databasecrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.models.CosmosDatabaseProperties; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.FeedOptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DatabaseCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + + private CosmosDatabase database; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following database CRUD operations: + * -Create + * -Read by ID + * -Read all + * -Delete + */ + public static void main(String[] args) { + DatabaseCRUDQuickstart p = new DatabaseCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.databaseCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void databaseCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + readDatabaseById(); + readAllDatabases(); + // deleteADatabase() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Database read + private void readDatabaseById() throws Exception { + logger.info("Read database " + databaseName + " by ID."); + + // Read database by ID + database = client.getDatabase(databaseName); + + logger.info("Done."); + } + + // Database read all + private void readAllDatabases() throws Exception { + logger.info("Read all databases in the account."); + + // Read all databases in the account + CosmosPagedIterable databases = client.readAllDatabases(new FeedOptions()); + + // Print + String msg="Listing databases in account:\n"; + for(CosmosDatabaseProperties dbProps : databases) { + msg += String.format("-Database ID: %s\n",dbProps.getId()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java new file mode 100644 index 0000000..768956d --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/documentcrud/sync/DocumentCRUDQuickstart.java @@ -0,0 +1,337 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.documentcrud.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.AccessCondition; +import com.azure.cosmos.models.AccessConditionType; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.UUID; + +public class DocumentCRUDQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + private final String documentId = UUID.randomUUID().toString(); + private final String documentLastName = "Witherspoon"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate the following document CRUD operations: + * -Create + * -Read by ID + * -Read all + * -Query + * -Replace + * -Upsert + * -Replace with conditional ETag check + * -Read document only if document has changed + * -Delete + */ + public static void main(String[] args) { + DocumentCRUDQuickstart p = new DocumentCRUDQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.documentCRUDDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void documentCRUDDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + createDocument(); + readDocumentById(); + readAllDocumentsInContainer(); + queryDocuments(); + replaceDocument(); + upsertDocument(); + replaceDocumentWithConditionalEtagCheck(); + readDocumentOnlyIfChanged(); + // deleteDocument() is called at shutdown() + + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + private void createDocument() throws Exception { + logger.info("Create document " + documentId); + + // Define a document as a POJO (internally this + // is converted to JSON via custom serialization) + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + + // Insert this item as a document + // Explicitly specifying the /pk value improves performance. + container.createItem(family,new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Document read + private void readDocumentById() throws Exception { + logger.info("Read document " + documentId + " by ID."); + + // Read document by ID + Family family = container.readItem(documentId,new PartitionKey(documentLastName),Family.class).getResource(); + + // Check result + logger.info("Finished reading family " + family.getId() + " with partition key " + family.getLastName()); + + logger.info("Done."); + } + + // Container read all + private void readAllDocumentsInContainer() throws Exception { + logger.info("Read all documents in container " + containerName + "."); + + // Read all documents in the container + CosmosPagedIterable families = container.readAllItems(new FeedOptions(),Family.class); + + // Print + String msg="Listing documents in container:\n"; + for(Family family : families) { + msg += String.format("-Family (/id,partition key)): (%s,%s)\n",family.getId(),family.getLastName()); + } + logger.info(msg + "\n"); + + logger.info("Done."); + } + + private void queryDocuments() throws Exception { + logger.info("Query documents in the container " + containerName + "."); + + String sql = "SELECT * FROM c WHERE c.lastName = 'Witherspoon'"; + + CosmosPagedIterable filteredFamilies = container.queryItems(sql, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + + private void replaceDocument() throws Exception { + logger.info("Replace document " + documentId); + + // Replace existing document with new modified document + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + family.setDistrict("Columbia"); // Document modification + + CosmosItemResponse famResp = + container.replaceItem(family, family.getId(), new PartitionKey(family.getLastName()), new CosmosItemRequestOptions()); + + logger.info("Request charge of replace operation: {} RU", famResp.getRequestCharge()); + + logger.info("Done."); + } + + private void upsertDocument() throws Exception { + logger.info("Replace document " + documentId); + + // Replace existing document with new modified document (contingent on modification). + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + family.setDistrict("Columbia"); // Document modification + + CosmosItemResponse famResp = + container.upsertItem(family, new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + private void replaceDocumentWithConditionalEtagCheck() throws Exception { + logger.info("Replace document " + documentId + ", employing optimistic concurrency using ETag."); + + // Obtained current document ETag + CosmosItemResponse famResp = + container.readItem(documentId, new PartitionKey(documentLastName), Family.class); + String etag = famResp.getResponseHeaders().get("etag"); + + logger.info("Read document " + documentId + " to obtain current ETag: " + etag); + + // Modify document + Family family = famResp.getResource(); + family.setRegistered(!family.isRegistered()); + + // Persist the change back to the server, updating the ETag in the process + // This models a concurrent change made to the document + CosmosItemResponse updatedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + logger.info("'Concurrent' update to document " + documentId + " so ETag is now " + updatedFamResp.getResponseHeaders().get("etag")); + + // Now update the document and call replace with the AccessCondition requiring that ETag has not changed. + // This should fail because the "concurrent" document change updated the ETag. + try { + AccessCondition ac = new AccessCondition(); + ac.setType(AccessConditionType.IF_MATCH); + ac.setCondition(etag); + + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.setAccessCondition(ac); + + family.setDistrict("Seafood"); + + CosmosItemResponse failedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),requestOptions); + + } catch (CosmosClientException cce) { + logger.info("As expected, we have a pre-condition failure exception\n"); + } + + logger.info("Done."); + } + + private void readDocumentOnlyIfChanged() throws Exception { + logger.info("Read document " + documentId + " only if it has been changed, utilizing an ETag check."); + + // Read document + CosmosItemResponse famResp = + container.readItem(documentId, new PartitionKey(documentLastName), Family.class); + logger.info("Read doc with status code of {}", famResp.getStatusCode()); + + // Re-read but with conditional access requirement that ETag has changed. + // This should fail. + + String etag = famResp.getResponseHeaders().get("etag"); + AccessCondition ac = new AccessCondition(); + ac.setType(AccessConditionType.IF_NONE_MATCH); + ac.setCondition(etag); + CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); + requestOptions.setAccessCondition(ac); + + CosmosItemResponse failResp = + container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + + logger.info("Re-read doc with status code of {} (we anticipate failure due to ETag not having changed.)", failResp.getStatusCode()); + + // Replace the doc with a modified version, which will update ETag + Family family = famResp.getResource(); + family.setRegistered(!family.isRegistered()); + CosmosItemResponse failedFamResp = + container.replaceItem(family,family.getId(),new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + logger.info("Modified and replaced the doc (updates ETag.)"); + + // Re-read doc again, with conditional acccess requirements. + // This should succeed since ETag has been updated. + CosmosItemResponse succeedResp = + container.readItem(documentId, new PartitionKey(documentLastName), requestOptions, Family.class); + logger.info("Re-read doc with status code of {} (we anticipate success due to ETag modification.)", succeedResp.getStatusCode()); + + logger.info("Done."); + } + + // Document delete + private void deleteADocument() throws Exception { + logger.info("Delete document " + documentId + " by ID."); + + // Delete document + container.deleteItem(documentId, new PartitionKey(documentLastName), new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADocument(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java new file mode 100644 index 0000000..34628ff --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/async/SampleIndexManagementAsync.java @@ -0,0 +1,389 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.indexmanagement.async; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosPagedFlux; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.CosmosAsyncContainerResponse; +import com.azure.cosmos.models.CosmosAsyncDatabaseResponse; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.ExcludedPath; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.IncludedPath; +import com.azure.cosmos.models.IndexingMode; +import com.azure.cosmos.models.IndexingPolicy; +import com.azure.cosmos.models.PartitionKey; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; + +public class SampleIndexManagementAsync { + + private CosmosAsyncClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + *

+ * This sample is similar to SampleCRUDQuickstartAsync, but modified to show indexing capabilities of Cosmos DB. + * Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of + * indexing capabilities. + */ + //

+ public static void main(String[] args) { + SampleIndexManagementAsync p = new SampleIndexManagementAsync(); + + try { + logger.info("Starting ASYNC main"); + p.indexManagementDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + //
+ + private void indexManagementDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create async client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + // + + createDatabaseIfNotExists(); + + //Here is where index management is performed + createContainerIfNotExistsWithSpecifiedIndex(); + + Family andersenFamilyItem = Families.getAndersenFamilyItem(); + Family wakefieldFamilyItem = Families.getWakefieldFamilyItem(); + Family johnsonFamilyItem = Families.getJohnsonFamilyItem(); + Family smithFamilyItem = Families.getSmithFamilyItem(); + + // Setup family items to create + Flux familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + createFamilies(familiesToCreate); + + familiesToCreate = Flux.just(andersenFamilyItem, + wakefieldFamilyItem, + johnsonFamilyItem, + smithFamilyItem); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + Mono databaseIfNotExists = client.createDatabaseIfNotExists(databaseName); + databaseIfNotExists.flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + logger.info("Checking database " + database.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + // + } + + private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + // + + CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, "/lastName"); + + // + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); //To turn indexing off set IndexingMode.NONE + + // Included paths + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.setPath("/*"); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + // Excluded paths + List excludedPaths = new ArrayList<>(); + ExcludedPath excludedPath = new ExcludedPath(); + excludedPath.setPath("/name/*"); + excludedPaths.add(excludedPath); + indexingPolicy.setExcludedPaths(excludedPaths); + + // Spatial indices - if you need them, here is how to set them up: + /* + List spatialIndexes = new ArrayList(); + List collectionOfSpatialTypes = new ArrayList(); + + SpatialSpec spec = new SpatialSpec(); + spec.setPath("/locations/*"); + collectionOfSpatialTypes.add(SpatialType.Point); + spec.setSpatialTypes(collectionOfSpatialTypes); + spatialIndexes.add(spec); + + indexingPolicy.setSpatialIndexes(spatialIndexes); + */ + + // Composite indices - if you need them, here is how to set them up: + /* + List> compositeIndexes = new ArrayList<>(); + List compositePaths = new ArrayList<>(); + + CompositePath nameCompositePath = new CompositePath(); + nameCompositePath.setPath("/name"); + nameCompositePath.setOrder(CompositePathSortOrder.ASCENDING); + + CompositePath ageCompositePath = new CompositePath(); + ageCompositePath.setPath("/age"); + ageCompositePath.setOrder(CompositePathSortOrder.DESCENDING); + + compositePaths.add(ageCompositePath); + compositePaths.add(nameCompositePath); + + compositeIndexes.add(compositePaths); + indexingPolicy.setCompositeIndexes(compositeIndexes); + */ + + containerProperties.setIndexingPolicy(indexingPolicy); + + // + + Mono containerIfNotExists = database.createContainerIfNotExists(containerProperties, 400); + + // Create container with 400 RU/s + containerIfNotExists.flatMap(containerResponse -> { + container = containerResponse.getContainer(); + logger.info("Checking container " + container.getId() + " completed!\n"); + return Mono.empty(); + }).block(); + + // + } + + private void createFamilies(Flux families) throws Exception { + + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + // Combine multiple item inserts, associated success println's, and a final aggregate stats println into one Reactive stream. + families.flatMap(family -> { + return container.createItem(family); + }) //Flux of item request responses + .flatMap(itemResponse -> { + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + itemResponse.getRequestCharge(), itemResponse.getRequestLatency())); + logger.info(String.format("Item ID: %s\n", itemResponse.getItem().getId())); + return Mono.just(itemResponse.getRequestCharge()); + }) //Flux of request charges + .reduce(0.0, + (charge_n, charge_nplus1) -> charge_n + charge_nplus1 + ) //Mono of total charge - there will be only one item in this stream + .subscribe(charge -> { + logger.info(String.format("Created items with total request charge of %.2f\n", + charge)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.info(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); //Preserve the total charge and print aggregate charge/item count stats. + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + + // + } + + private void readItems(Flux familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + // + + final CountDownLatch completionLatch = new CountDownLatch(1); + + familiesToCreate.flatMap(family -> { + Mono> asyncItemResponseMono = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + return asyncItemResponseMono; + }) + .subscribe( + itemResponse -> { + double requestCharge = itemResponse.getRequestCharge(); + Duration requestLatency = itemResponse.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + itemResponse.getItem().getId(), requestCharge, requestLatency)); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.info(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + + // + } + + private void queryItems() { + // + // Set some common query options + + FeedOptions queryOptions = new FeedOptions(); + queryOptions.setMaxItemCount(10); + // Set populate query metrics to get metrics around query executions + queryOptions.setPopulateQueryMetrics(true); + + CosmosPagedFlux pagedFluxResponse = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + pagedFluxResponse.byPage().subscribe( + fluxResponse -> { + logger.info("Got a page of query result with " + + fluxResponse.getResults().size() + " items(s)" + + " and request charge of " + fluxResponse.getRequestCharge()); + + logger.info("Item Ids " + fluxResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.error(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); + + try { + completionLatch.await(); + } catch (InterruptedException err) { + throw new AssertionError("Unexpected Interruption", err); + } + + // + } + + private void shutdown() { + try { + //Clean shutdown + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); + if (container != null) + container.delete().subscribe(); + logger.info("-Deleting database..."); + if (database != null) + database.delete().subscribe(); + logger.info("-Closing the client..."); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done."); + } +} diff --git a/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java new file mode 100644 index 0000000..748dbfe --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/indexmanagement/sync/SampleIndexManagement.java @@ -0,0 +1,287 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.indexmanagement.sync; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Families; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.ExcludedPath; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.IncludedPath; +import com.azure.cosmos.models.IndexingMode; +import com.azure.cosmos.models.IndexingPolicy; +import com.azure.cosmos.models.PartitionKey; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class SampleIndexManagement { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Run a Hello CosmosDB console application. + *

+ * This sample is similar to SampleCRUDQuickstart, but modified to show indexing capabilities of Cosmos DB. + * Look at the implementation of createContainerIfNotExistsWithSpecifiedIndex() for the demonstration of + * indexing capabilities. + */ + //

+ public static void main(String[] args) { + + SampleIndexManagement p = new SampleIndexManagement(); + + try { + logger.info("Starting SYNC main"); + p.indexManagementDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + //
+ + private void indexManagementDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + // + + createDatabaseIfNotExists(); + + //Here is where index management is performed + createContainerIfNotExistsWithSpecifiedIndex(); + + // Setup family items to create + ArrayList familiesToCreate = new ArrayList<>(); + familiesToCreate.add(Families.getAndersenFamilyItem()); + familiesToCreate.add(Families.getWakefieldFamilyItem()); + familiesToCreate.add(Families.getJohnsonFamilyItem()); + familiesToCreate.add(Families.getSmithFamilyItem()); + + createFamilies(familiesToCreate); + + logger.info("Reading items."); + readItems(familiesToCreate); + + logger.info("Querying items."); + queryItems(); + } + + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists."); + + // Create database if not exists + // + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + // + + logger.info("Checking database " + database.getId() + " completed!\n"); + } + + private void createContainerIfNotExistsWithSpecifiedIndex() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // + IndexingPolicy indexingPolicy = new IndexingPolicy(); + indexingPolicy.setIndexingMode(IndexingMode.CONSISTENT); //To turn indexing off set IndexingMode.NONE + + // Included paths + List includedPaths = new ArrayList<>(); + IncludedPath includedPath = new IncludedPath(); + includedPath.setPath("/*"); + includedPaths.add(includedPath); + indexingPolicy.setIncludedPaths(includedPaths); + + // Excluded paths + List excludedPaths = new ArrayList<>(); + ExcludedPath excludedPath = new ExcludedPath(); + excludedPath.setPath("/name/*"); + excludedPaths.add(excludedPath); + indexingPolicy.setExcludedPaths(excludedPaths); + + // Spatial indices - if you need them, here is how to set them up: + /* + List spatialIndexes = new ArrayList(); + List collectionOfSpatialTypes = new ArrayList(); + + SpatialSpec spec = new SpatialSpec(); + spec.setPath("/locations/*"); + collectionOfSpatialTypes.add(SpatialType.Point); + spec.setSpatialTypes(collectionOfSpatialTypes); + spatialIndexes.add(spec); + + indexingPolicy.setSpatialIndexes(spatialIndexes); + */ + + // Composite indices - if you need them, here is how to set them up: + /* + List> compositeIndexes = new ArrayList<>(); + List compositePaths = new ArrayList<>(); + + CompositePath nameCompositePath = new CompositePath(); + nameCompositePath.setPath("/name"); + nameCompositePath.setOrder(CompositePathSortOrder.ASCENDING); + + CompositePath ageCompositePath = new CompositePath(); + ageCompositePath.setPath("/age"); + ageCompositePath.setOrder(CompositePathSortOrder.DESCENDING); + + compositePaths.add(ageCompositePath); + compositePaths.add(nameCompositePath); + + compositeIndexes.add(compositePaths); + indexingPolicy.setCompositeIndexes(compositeIndexes); + */ + + containerProperties.setIndexingPolicy(indexingPolicy); + + // + + // Create container with 400 RU/s + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + + logger.info("Checking container " + container.getId() + " completed!\n"); + } + + private void createFamilies(List families) throws Exception { + double totalRequestCharge = 0; + for (Family family : families) { + + // + // Create item using container that we created using sync client + + // Use lastName as partitionKey for cosmos item + // Using appropriate partition key improves the performance of database operations + CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); + CosmosItemResponse item = container.createItem(family, new PartitionKey(family.getLastName()), cosmosItemRequestOptions); + // + + // Get request charge and other properties like latency, and diagnostics strings, etc. + logger.info(String.format("Created item with request charge of %.2f within" + + " duration %s", + item.getRequestCharge(), item.getRequestLatency())); + totalRequestCharge += item.getRequestCharge(); + } + logger.info(String.format("Created %d items with total request " + + "charge of %.2f", + families.size(), + totalRequestCharge)); + } + + private void readItems(ArrayList familiesToCreate) { + // Using partition key for point read scenarios. + // This will help fast look up of items because of partition key + familiesToCreate.forEach(family -> { + // + try { + CosmosItemResponse item = container.readItem(family.getId(), new PartitionKey(family.getLastName()), Family.class); + double requestCharge = item.getRequestCharge(); + Duration requestLatency = item.getRequestLatency(); + logger.info(String.format("Item successfully read with id %s with a charge of %.2f and within duration %s", + item.getResource().getId(), requestCharge, requestLatency)); + } catch (CosmosClientException e) { + e.printStackTrace(); + logger.error(String.format("Read Item failed with %s", e)); + } + // + }); + } + + private void queryItems() { + // + // Set some common query options + FeedOptions queryOptions = new FeedOptions(); + queryOptions.setMaxItemCount(10); + // Set populate query metrics to get metrics around query executions + queryOptions.setPopulateQueryMetrics(true); + + CosmosPagedIterable familiesPagedIterable = container.queryItems( + "SELECT * FROM Family WHERE Family.lastName IN ('Andersen', 'Wakefield', 'Johnson')", queryOptions, Family.class); + + familiesPagedIterable.iterableByPage().forEach(cosmosItemPropertiesFeedResponse -> { + logger.info("Got a page of query result with " + + cosmosItemPropertiesFeedResponse.getResults().size() + " items(s)" + + " and request charge of " + cosmosItemPropertiesFeedResponse.getRequestCharge()); + + logger.info("Item Ids " + cosmosItemPropertiesFeedResponse + .getResults() + .stream() + .map(Family::getId) + .collect(Collectors.toList())); + }); + // + } + + private void shutdown() { + try { + //Clean shutdown + logger.info("Deleting Cosmos DB resources"); + logger.info("-Deleting container..."); + if (container != null) + container.delete(); + logger.info("-Deleting database..."); + if (database != null) + database.delete(); + logger.info("-Closing the client..."); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done."); + } +} diff --git a/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java new file mode 100644 index 0000000..15cf790 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/queries/sync/QueriesQuickstart.java @@ -0,0 +1,446 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.queries.sync; + +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Family; +import com.azure.cosmos.implementation.http.HttpResponse; +import com.azure.cosmos.models.AccessCondition; +import com.azure.cosmos.models.AccessConditionType; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosContainerRequestOptions; +import com.azure.cosmos.models.CosmosContainerResponse; +import com.azure.cosmos.models.CosmosDatabaseRequestOptions; +import com.azure.cosmos.models.CosmosDatabaseResponse; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.FeedResponse; +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlParameterList; +import com.azure.cosmos.models.SqlQuerySpec; +import com.fasterxml.jackson.databind.JsonNode; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.UUID; + +public class QueriesQuickstart { + + private CosmosClient client; + + private final String databaseName = "AzureSampleFamilyDB"; + private final String containerName = "FamilyContainer"; + private final String documentId = UUID.randomUUID().toString(); + private final String documentLastName = "Witherspoon"; + + private CosmosDatabase database; + private CosmosContainer container; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Sample to demonstrate Azure Cosmos DB queries via Java SQL API, including queries for: + * -All documents + * -Equality using == + * -Inequality using != and NOT + * -Using range operators like >, <, >=, <= + * -Using range operators against Strings + * -With ORDER BY + * -With aggregate functions + * -With subdocuments + * -With intra-document joins + * -With String, math and array operators + * -With parameterized SQL using SqlQuerySpec + * -With explicit paging + * -Query partitioned collections in parallel + * -With ORDER BY for partitioned collections + */ + public static void main(String[] args) { + QueriesQuickstart p = new QueriesQuickstart(); + + try { + logger.info("Starting SYNC main"); + p.queriesDemo(); + logger.info("Demo complete, please hold while resources are released"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + } finally { + logger.info("Closing the client"); + p.shutdown(); + } + } + + private void queriesDemo() throws Exception { + + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + // Create sync client + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + + createDatabaseIfNotExists(); + createContainerIfNotExists(); + + createDocument(); + + queryAllDocuments(); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(new FeedOptions()); + queryEquality(); + queryInequality(); + queryRange(); + queryRangeAgainstStrings(); + queryOrderBy(); + queryWithAggregateFunctions(); + querySubdocuments(); + queryIntraDocumentJoin(); + queryStringMathAndArrayOperators(); + queryWithQuerySpec(); + parallelQueryWithPagingAndContinuationTokenAndPrintQueryCharge(); + + // deleteDocument() is called at shutdown() + + } + + private void executeQueryPrintSingleResult(String sql) { + logger.info("Execute query {}",sql); + + CosmosPagedIterable filteredFamilies = container.queryItems(sql, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + + private void executeQueryWithQuerySpecPrintSingleResult(SqlQuerySpec querySpec) { + logger.info("Execute query {}",querySpec.getQueryText()); + + CosmosPagedIterable filteredFamilies = container.queryItems(querySpec, new FeedOptions(), Family.class); + + // Print + if (filteredFamilies.iterator().hasNext()) { + Family family = filteredFamilies.iterator().next(); + logger.info("First query result: Family with (/id, partition key) = (%s,%s)",family.getId(),family.getLastName()); + } + + logger.info("Done."); + } + + // Database Create + private void createDatabaseIfNotExists() throws Exception { + logger.info("Create database " + databaseName + " if not exists..."); + + // Create database if not exists + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + logger.info("Done."); + } + + // Container create + private void createContainerIfNotExists() throws Exception { + logger.info("Create container " + containerName + " if not exists."); + + // Create container if not exists + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/lastName"); + + // Create container with 200 RU/s + container = database.createContainerIfNotExists(containerProperties, 200).getContainer(); + + logger.info("Done."); + } + + private void createDocument() throws Exception { + logger.info("Create document " + documentId); + + // Define a document as a POJO (internally this + // is converted to JSON via custom serialization) + Family family = new Family(); + family.setLastName(documentLastName); + family.setId(documentId); + + // Insert this item as a document + // Explicitly specifying the /pk value improves performance. + container.createItem(family,new PartitionKey(family.getLastName()),new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + private void queryAllDocuments() throws Exception { + logger.info("Query all documents."); + + executeQueryPrintSingleResult("SELECT * FROM c"); + } + + private void queryWithPagingAndContinuationTokenAndPrintQueryCharge(FeedOptions options) throws Exception { + logger.info("Query with paging and continuation token; print the total RU charge of the query"); + + String query = "SELECT * FROM Families"; + + int pageSize = 100; //No of docs per page + int currentPageNumber = 1; + int documentNumber = 0; + String continuationToken = null; + + double requestCharge = 0.0; + + // First iteration (continuationToken == null): Receive a batch of query response pages + // Subsequent iterations (continuationToken != null): Receive subsequent batch of query response pages, with continuationToken indicating where the previous iteration left off + do { + logger.info("Receiving a set of query response pages."); + logger.info("Continuation Token: " + continuationToken + "\n"); + + FeedOptions queryOptions = new FeedOptions(); + + // note that setMaxItemCount sets the number of items to return in a single page result + queryOptions.setMaxItemCount(pageSize); + queryOptions.setRequestContinuation(continuationToken); + + Iterable> feedResponseIterator = + container.queryItems(query, queryOptions, Family.class).iterableByPage(); + + for (FeedResponse page : feedResponseIterator) { + logger.info(String.format("Current page number: %d", currentPageNumber)); + // Access all of the documents in this result page + for (Family docProps : page.getResults()) { + documentNumber++; + } + + // Accumulate the request charge of this page + requestCharge += page.getRequestCharge(); + + // Page count so far + logger.info(String.format("Total documents received so far: %d", documentNumber)); + + // Request charge so far + logger.info(String.format("Total request charge so far: %f\n", requestCharge)); + + // Along with page results, get a continuation token + // which enables the client to "pick up where it left off" + // in accessing query response pages. + continuationToken = page.getContinuationToken(); + + currentPageNumber++; + } + + } while (continuationToken != null); + + logger.info(String.format("Total request charge: %f\n", requestCharge)); + } + + private void parallelQueryWithPagingAndContinuationTokenAndPrintQueryCharge() throws Exception { + logger.info("Parallel implementation of:"); + + FeedOptions options = new FeedOptions(); + + // 0 maximum parallel tasks, effectively serial execution + options.setMaxDegreeOfParallelism(0); + options.setMaxBufferedItemCount(100); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(options); + + // 1 maximum parallel tasks, 1 dedicated asynchronous task to continuously make REST calls + options.setMaxDegreeOfParallelism(1); + options.setMaxBufferedItemCount(100); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(options); + + // 10 maximum parallel tasks, a maximum of 10 dedicated asynchronous tasks to continuously make REST calls + options.setMaxDegreeOfParallelism(10); + options.setMaxBufferedItemCount(100); + queryWithPagingAndContinuationTokenAndPrintQueryCharge(options); + + logger.info("Done with parallel queries."); + } + + private void queryEquality() throws Exception { + logger.info("Query for equality using =="); + + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.id == '" + documentId + "'"); + } + + private void queryInequality() throws Exception { + logger.info("Query for inequality"); + + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.id != '" + documentId + "'"); + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.id <> '" + documentId + "'"); + + // Combine equality and inequality + executeQueryPrintSingleResult("SELECT * FROM c WHERE c.lastName == '" + documentLastName + "' && c.id != '" + documentId + "'"); + } + + private void queryRange() throws Exception { + logger.info("Numerical range query"); + + // Numerical range query + executeQueryPrintSingleResult("SELECT * FROM Families f WHERE f.Children[0].Grade > 5"); + } + + private void queryRangeAgainstStrings() throws Exception { + logger.info("String range query"); + + // String range query + executeQueryPrintSingleResult("SELECT * FROM Families f WHERE f.Address.State > 'NY'"); + } + + private void queryOrderBy() throws Exception { + logger.info("ORDER BY queries"); + + // Numerical ORDER BY + executeQueryPrintSingleResult("SELECT * FROM Families f WHERE f.LastName = 'Andersen' ORDER BY f.Children[0].Grade"); + } + + private void queryDistinct() throws Exception { + logger.info("DISTINCT queries"); + + // DISTINCT query + executeQueryPrintSingleResult("SELECT DISTINCT c.lastName from c"); + } + + private void queryWithAggregateFunctions() throws Exception { + logger.info("Aggregate function queries"); + + // Basic query with aggregate functions + executeQueryPrintSingleResult("SELECT VALUE COUNT(f) FROM Families f WHERE f.LastName = 'Andersen'"); + + // Query with aggregate functions within documents + executeQueryPrintSingleResult("SELECT VALUE COUNT(child) FROM child IN f.Children"); + } + + private void querySubdocuments() throws Exception { + // Cosmos DB supports the selection of sub-documents on the server, there + // is no need to send down the full family record if all you want to display + // is a single child + + logger.info("Subdocument query"); + + executeQueryPrintSingleResult("SELECT VALUE c FROM c IN f.Children"); + } + + private void queryIntraDocumentJoin() throws Exception { + // Cosmos DB supports the notion of an Intra-document Join, or a self-join + // which will effectively flatten the hierarchy of a document, just like doing + // a self JOIN on a SQL table + + logger.info("Intra-document joins"); + + // Single join + executeQueryPrintSingleResult("SELECT f.id FROM Families f JOIN c IN f.Children"); + + // Two joins + executeQueryPrintSingleResult("SELECT f.id as family, c.FirstName AS child, p.GivenName AS pet " + + "FROM Families f " + + "JOIN c IN f.Children " + + "join p IN c.Pets"); + + // Two joins and a filter + executeQueryPrintSingleResult("SELECT f.id as family, c.FirstName AS child, p.GivenName AS pet " + + "FROM Families f " + + "JOIN c IN f.Children " + + "join p IN c.Pets " + + "WHERE p.GivenName = 'Fluffy'"); + } + + private void queryStringMathAndArrayOperators() throws Exception { + logger.info("Queries with string, math and array operators"); + + // String STARTSWITH operator + executeQueryPrintSingleResult("SELECT * FROM family WHERE STARTSWITH(family.LastName, 'An')"); + + // Round down numbers with FLOOR + executeQueryPrintSingleResult("SELECT VALUE FLOOR(family.Children[0].Grade) FROM family"); + + // Get number of children using array length + executeQueryPrintSingleResult("SELECT VALUE ARRAY_LENGTH(family.Children) FROM family"); + } + + private void queryWithQuerySpec() throws Exception { + logger.info("Query with SqlQuerySpec"); + + FeedOptions options = new FeedOptions(); + options.setPartitionKey(new PartitionKey("Witherspoon")); + + // Simple query with a single property equality comparison + // in SQL with SQL parameterization instead of inlining the + // parameter values in the query string + + SqlParameterList paramList = new SqlParameterList(); + paramList.add(new SqlParameter("@id", "AndersenFamily")); + SqlQuerySpec querySpec = new SqlQuerySpec( + "SELECT * FROM Families f WHERE (f.id = @id)", + paramList); + + executeQueryWithQuerySpecPrintSingleResult(querySpec); + + // Query using two properties within each document. WHERE Id == "" AND Address.City == "" + // notice here how we are doing an equality comparison on the string value of City + + paramList = new SqlParameterList(); + paramList.add(new SqlParameter("@id", "AndersenFamily")); + paramList.add(new SqlParameter("@city", "Seattle")); + querySpec = new SqlQuerySpec( + "SELECT * FROM Families f WHERE f.id = @id AND f.Address.City = @city", + paramList); + + executeQueryWithQuerySpecPrintSingleResult(querySpec); + } + + // Document delete + private void deleteADocument() throws Exception { + logger.info("Delete document " + documentId + " by ID."); + + // Delete document + container.deleteItem(documentId, new PartitionKey(documentLastName), new CosmosItemRequestOptions()); + + logger.info("Done."); + } + + // Database delete + private void deleteADatabase() throws Exception { + logger.info("Last step: delete database " + databaseName + " by ID."); + + // Delete database + CosmosDatabaseResponse dbResp = client.getDatabase(databaseName).delete(new CosmosDatabaseRequestOptions()); + logger.info("Status code for database delete: {}",dbResp.getStatusCode()); + + logger.info("Done."); + } + + // Cleanup before close + private void shutdown() { + try { + //Clean shutdown + deleteADocument(); + deleteADatabase(); + } catch (Exception err) { + logger.error("Deleting Cosmos DB resources failed, will still attempt to close the client. See stack trace below."); + err.printStackTrace(); + } + client.close(); + logger.info("Done with sample."); + } + +} diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java new file mode 100644 index 0000000..8b637ad --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/async/SampleRequestThroughputAsync.java @@ -0,0 +1,167 @@ +package com.azure.cosmos.examples.requestthroughput.async; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Profile; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.fasterxml.jackson.databind.JsonNode; +import com.google.common.util.concurrent.AtomicDouble; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + + +/* + * Async Request Throughput Sample + * + * Please note that perf testing incurs costs for provisioning container throughput and storage. + * + * This throughput profiling sample issues high-throughput document insert requests to an Azure Cosmos DB container. + * Run this code in a geographically colocated VM for best performance. + * + * Example configuration + * -Provision 100000 RU/s container throughput + * -Generate 4M documents + * -Result: ~60K RU/s actual throughput + */ + +public class SampleRequestThroughputAsync { + + protected static Logger logger = LoggerFactory.getLogger(SampleRequestThroughputAsync.class.getSimpleName()); + + public static void main(String[] args) { + try { + requestThroughputDemo(); + } catch(Exception err) { + logger.error("Failed running demo: ", err); + } + } + + private static CosmosAsyncClient client; + private static CosmosAsyncDatabase database; + private static CosmosAsyncContainer container; + private static AtomicBoolean resources_created = new AtomicBoolean(false); + private static AtomicInteger number_docs_inserted = new AtomicInteger(0); + private static AtomicBoolean resources_deleted = new AtomicBoolean(false); + private static AtomicDouble total_charge = new AtomicDouble(0.0); + + public static void requestThroughputDemo() { + ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); + + // Create Async client. + // Building an async client is still a sync operation. + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(ConnectionPolicy.getDefaultPolicy()) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + // Describe the logic of database and container creation using Reactor... + Mono databaseContainerIfNotExist = client.createDatabaseIfNotExists("ContosoInventoryDB").flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + logger.info("\n\n\n\nCreated database ContosoInventoryDB.\n\n\n\n"); + CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); + return database.createContainerIfNotExists(containerProperties, 400); + }).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + logger.info("\n\n\n\nCreated container ContosoInventoryContainer.\n\n\n\n"); + return Mono.empty(); + }); + + // ...it doesn't execute until you subscribe(). + // The async call returns immediately... + logger.info("Creating database and container asynchronously..."); + databaseContainerIfNotExist.subscribe(voidItem -> {}, err -> {}, + () -> { + logger.info("Finished creating resources.\n\n"); + resources_created.set(true); + }); + + // ...so we can do other things until async response arrives! + logger.info("Doing other things until async resource creation completes......"); + while (!resources_created.get()) Profile.doOtherThings(); + + // Container is created. Generate many docs to insert. + int number_of_docs = 50000; + logger.info("Generating {} documents...", number_of_docs); + ArrayList docs = Profile.generateDocs(number_of_docs); + + // Insert many docs into container... + logger.info("Inserting {} documents...", number_of_docs); + + Profile.tic(); + int last_docs_inserted=0; + double last_total_charge=0.0; + + Flux.fromIterable(docs).flatMap(doc -> container.createItem(doc)) + // ^Publisher: upon subscription, createItem inserts a doc & + // publishes request response to the next operation... + .flatMap(itemResponse -> { + // ...Streaming operation: count each doc & check success... + + if (itemResponse.getStatusCode() == 201) { + number_docs_inserted.getAndIncrement(); + total_charge.getAndAdd(itemResponse.getRequestCharge()); + } + else + logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); + return Mono.empty(); + }).subscribe(); // ...Subscribing to the publisher triggers stream execution. + + // Do other things until async response arrives + logger.info("Doing other things until async doc inserts complete..."); + //while (number_docs_inserted.get() < number_of_docs) Profile.doOtherThings(); + double toc_time=0.0; + int current_docs_inserted=0; + double current_total_charge=0.0, rps=0.0, rups=0.0; + while (number_docs_inserted.get() < number_of_docs) { + toc_time=Profile.toc_ms(); + current_docs_inserted=number_docs_inserted.get(); + current_total_charge=total_charge.get(); + if (toc_time >= 1000.0) { + Profile.tic(); + rps=1000.0*((double)(current_docs_inserted-last_docs_inserted))/toc_time; + rups=1000.0*(current_total_charge-last_total_charge)/toc_time; + logger.info(String.format("\n\n\n\n" + + "Async Throughput Profiler Result, Last 1000ms:" + "\n\n" + + "%8s %8s", StringUtils.center("Req/sec",8),StringUtils.center("RU/s",8)) + "\n" + + "----------------------------------" + "\n" + + String.format("%8.1f %8.1f",rps,rups) + "\n\n\n\n"); + last_docs_inserted=current_docs_inserted; + last_total_charge=current_total_charge; + } + } + + // Inserts are complete. Cleanup (asynchronously!) + logger.info("Deleting resources."); + container.delete() + .flatMap(containerResponse -> database.delete()) + .subscribe(dbItem -> {}, err -> {}, + () -> { + logger.info("Finished deleting resources."); + resources_deleted.set(true); + }); + + // Do other things until async response arrives + logger.info("Do other things until async resource delete completes..."); + while (!resources_deleted.get()) Profile.doOtherThings(); + + // Close client. This is always sync. + logger.info("Closing client..."); + client.close(); + logger.info("Done with demo."); + + } +} diff --git a/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java new file mode 100644 index 0000000..14b23c1 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/requestthroughput/sync/SampleRequestThroughput.java @@ -0,0 +1,144 @@ +package com.azure.cosmos.examples.requestthroughput.sync; + + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.ThrottlingRetryOptions; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.Profile; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.IndexingMode; +import com.azure.cosmos.models.IndexingPolicy; +import com.fasterxml.jackson.databind.JsonNode; +import com.google.common.util.concurrent.AtomicDouble; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicInteger; + +/* + * Sync Request Throughput Sample + * + * Please note that perf testing incurs costs for provisioning container throughput and storage. + * + * This throughput profiling sample issues high-throughput document insert requests to an Azure Cosmos DB container. + * Run this code in a geographically colocated VM for best performance. + * + */ + +public class SampleRequestThroughput { + + protected static Logger logger = LoggerFactory.getLogger(SampleRequestThroughput.class.getSimpleName()); + + public static void main(String[] args) { + try { + requestThroughputDemo(); + } catch(Exception err) { + logger.error("Failed running demo: ", err); + } + } + + private static CosmosClient client; + private static CosmosDatabase database; + private static CosmosContainer container; + private static AtomicInteger number_docs_inserted = new AtomicInteger(0); + private static AtomicDouble total_charge = new AtomicDouble(0.0); + private static int last_docs_inserted=0; + private static double last_total_charge=0.0; + private static double toc_time=0.0; + private static int current_docs_inserted=0; + private static double current_total_charge=0.0, rps=0.0, rups=0.0; + + public static void requestThroughputDemo() { + ConnectionPolicy my_connection_policy = ConnectionPolicy.getDefaultPolicy(); + ThrottlingRetryOptions retry_options = new ThrottlingRetryOptions(); + //retry_options.setMaxRetryWaitTime(Duration.ZERO); + my_connection_policy.setThrottlingRetryOptions(retry_options); + + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(my_connection_policy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + // This code synchronously sends a request to create a database. + // While the client waits for a response, this thread is blocked from + // performing other tasks. + database = client.createDatabaseIfNotExists("ContosoInventoryDB").getDatabase(); + logger.info("\n\n\n\nCreated database ContosoInventoryDB.\n\n\n\n"); + //IndexingPolicy indexingPolicy = new IndexingPolicy(); + //indexingPolicy.setIndexingMode(IndexingMode.NONE); + //indexingPolicy.setAutomatic(false); + CosmosContainerProperties containerProperties = new CosmosContainerProperties("ContosoInventoryContainer", "/id"); + //containerProperties.setIndexingPolicy(indexingPolicy); + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + logger.info("\n\n\n\nCreated container ContosoInventoryContainer.\n\n\n\n"); + // Resources are ready. + // + // Create many docs to insert into the container + int number_of_docs = 50000; + logger.info("Generating {} documents...", number_of_docs); + ArrayList docs = Profile.generateDocs(number_of_docs); + logger.info("Inserting {} documents...", number_of_docs); + + Profile.tic(); + + //Profiler code - it's good for this part to be async + Flux.interval(Duration.ofMillis(10)).map(tick -> { + //logger.info("In profiler."); + toc_time=Profile.toc_ms(); + current_docs_inserted=number_docs_inserted.get(); + current_total_charge=total_charge.get(); + if (toc_time >= 1000.0) { + Profile.tic(); + rps=1000.0*((double)(current_docs_inserted-last_docs_inserted))/toc_time; + rups=1000.0*(current_total_charge-last_total_charge)/toc_time; + logger.info(String.format("\n\n\n\n" + + "Sync Throughput Profiler Result, Last 1000ms:" + "\n\n" + + "%8s %8s", StringUtils.center("Req/sec",8),StringUtils.center("RU/s",8)) + "\n" + + "----------------------------------" + "\n" + + String.format("%8.1f %8.1f",rps,rups) + "\n\n\n\n"); + last_docs_inserted=current_docs_inserted; + last_total_charge=current_total_charge; + } + return tick; + }).subscribe(); + + // Insert many docs synchronously. + // The client blocks waiting for a response to each insert request, + // which limits throughput. + // While the client is waiting for a response, the thread is blocked from other tasks + for(JsonNode doc : docs) { + CosmosItemResponse itemResponse = container.createItem(doc); + if (itemResponse.getStatusCode() == 201) { + number_docs_inserted.getAndIncrement(); + total_charge.getAndAdd(itemResponse.getRequestCharge()); + } + else + logger.warn("WARNING insert status code {} != 201", itemResponse.getStatusCode()); + } + + // Clean up + logger.info("Deleting resources."); + container.delete(); + database.delete(); + logger.info("Finished deleting resources."); + + logger.info("Closing client..."); + client.close(); + + logger.info("Done with demo."); + + } +} diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java new file mode 100644 index 0000000..21ca683 --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/async/SampleStoredProcedureAsync.java @@ -0,0 +1,222 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.storedprocedure.async; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosAsyncClient; +import com.azure.cosmos.CosmosAsyncContainer; +import com.azure.cosmos.CosmosAsyncDatabase; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosClientException; +import com.azure.cosmos.CosmosPagedFlux; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.CustomPOJO; +import com.azure.cosmos.models.CosmosAsyncItemResponse; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosStoredProcedureProperties; +import com.azure.cosmos.models.CosmosStoredProcedureRequestOptions; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.Mono; + +import java.util.concurrent.CountDownLatch; + +public class SampleStoredProcedureAsync { + + private CosmosAsyncClient client; + + private final String databaseName = "SprocTestDB"; + private final String containerName = "SprocTestContainer"; + + private CosmosAsyncDatabase database; + private CosmosAsyncContainer container; + + private String sprocId; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Stored Procedure Example + *

+ * This sample code demonstrates creation, execution, and effects of stored procedures + * using Java SDK. A stored procedure is created which will insert a JSON object into + * a Cosmos DB container. The sample executes the stored procedure and then performs + * a point-read to confirm that the stored procedure had the intended effect. + */ + //

+ public static void main(String[] args) { + SampleStoredProcedureAsync p = new SampleStoredProcedureAsync(); + + try { + p.sprocDemo(); + logger.info("Demo complete, please hold while resources are released"); + p.shutdown(); + logger.info("Done.\n"); + } catch (Exception e) { + e.printStackTrace(); + logger.info(String.format("Cosmos getStarted failed with %s", e)); + p.close(); + } finally { + } + } + + //
+ + private void sprocDemo() throws Exception { + //Setup client, DB, and the container for which we will create stored procedures + //The container partition key will be id + setUp(); + + //Create stored procedure and list all stored procedures that have been created. + createStoredProcedure(); + readAllSprocs(); + + //Execute the stored procedure, which we expect will create an item with id test_doc + executeStoredProcedure(); + + //Perform a point-read to confirm that the item with id test_doc exists + logger.info("Checking that a document was created by the stored procedure..."); + CosmosAsyncItemResponse test_resp = + container.readItem("test_doc", new PartitionKey("test_doc"), CustomPOJO.class).block(); + logger.info(String.format( + "Status return value of point-read for document created by stored procedure (200 indicates success): %d", test_resp.getStatusCode())); + } + + public void setUp() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildAsyncClient(); + + logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + + client.createDatabaseIfNotExists(databaseName).flatMap(databaseResponse -> { + database = databaseResponse.getDatabase(); + return Mono.empty(); + }).block(); + + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/id"); + database.createContainerIfNotExists(containerProperties, 400).flatMap(containerResponse -> { + container = containerResponse.getContainer(); + return Mono.empty(); + }).block(); + } + + public void shutdown() throws Exception { + //Safe clean & close + deleteStoredProcedure(); + } + + public void createStoredProcedure() throws Exception { + logger.info("Creating stored procedure...\n"); + + sprocId = "createMyDocument"; + String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); + container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()).block(); + } + + private void readAllSprocs() throws Exception { + + FeedOptions feedOptions = new FeedOptions(); + CosmosPagedFlux fluxResponse = + container.getScripts().readAllStoredProcedures(feedOptions); + + final CountDownLatch completionLatch = new CountDownLatch(1); + + + fluxResponse.flatMap(storedProcedureProperties -> { + logger.info(String.format("Stored Procedure: %s\n", storedProcedureProperties.getId())); + return Mono.empty(); + }).subscribe( + s -> { + }, + err -> { + if (err instanceof CosmosClientException) { + //Client-specific errors + CosmosClientException cerr = (CosmosClientException) err; + cerr.printStackTrace(); + logger.info(String.format("Read Item failed with %s\n", cerr)); + } else { + //General errors + err.printStackTrace(); + } + + completionLatch.countDown(); + }, + () -> { + completionLatch.countDown(); + } + ); + + completionLatch.await(); + } + + public void executeStoredProcedure() throws Exception { + logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); + + CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); + options.setPartitionKey(new PartitionKey("test_doc")); + + container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options) + .flatMap(executeResponse -> { + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.getResponseAsString(), + executeResponse.getStatusCode(), + executeResponse.getRequestCharge())); + return Mono.empty(); + }).block(); + } + + public void deleteStoredProcedure() throws Exception { + logger.info("-Deleting stored procedure...\n"); + container.getScripts() + .getStoredProcedure(sprocId) + .delete().block(); + logger.info("-Deleting database...\n"); + database.delete().block(); + logger.info("-Closing client instance...\n"); + client.close(); + } +} diff --git a/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java new file mode 100644 index 0000000..2061c5a --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/storedprocedure/sync/SampleStoredProcedure.java @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.examples.storedprocedure.sync; + +import com.azure.cosmos.ConnectionPolicy; +import com.azure.cosmos.ConsistencyLevel; +import com.azure.cosmos.CosmosClient; +import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.CosmosContainer; +import com.azure.cosmos.CosmosDatabase; +import com.azure.cosmos.CosmosPagedIterable; +import com.azure.cosmos.examples.changefeed.SampleChangeFeedProcessor; +import com.azure.cosmos.examples.common.AccountSettings; +import com.azure.cosmos.examples.common.CustomPOJO; +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemResponse; +import com.azure.cosmos.models.CosmosStoredProcedureProperties; +import com.azure.cosmos.models.CosmosStoredProcedureRequestOptions; +import com.azure.cosmos.models.CosmosStoredProcedureResponse; +import com.azure.cosmos.models.FeedOptions; +import com.azure.cosmos.models.PartitionKey; +import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; + +public class SampleStoredProcedure { + + private CosmosClient client; + + private final String databaseName = "SprocTestDB"; + private final String containerName = "SprocTestContainer"; + + private CosmosDatabase database; + private CosmosContainer container; + + private String sprocId; + + protected static Logger logger = LoggerFactory.getLogger(SampleChangeFeedProcessor.class.getSimpleName()); + + public void close() { + client.close(); + } + + /** + * Stored Procedure Example + *

+ * This sample code demonstrates creation, execution, and effects of stored procedures + * using Java SDK. A stored procedure is created which will insert a JSON object into + * a Cosmos DB container. The sample executes the stored procedure and then performs + * a point-read to confirm that the stored procedure had the intended effect. + */ + //

+ public static void main(String[] args) { + SampleStoredProcedure p = new SampleStoredProcedure(); + + try { + p.sprocDemo(); + logger.info("Demo complete, please hold while resources are released"); + p.shutdown(); + logger.info("Done.\n"); + } catch (Exception e) { + e.printStackTrace(); + logger.error(String.format("Cosmos getStarted failed with %s", e)); + p.close(); + } finally { + } + } + + //
+ + private void sprocDemo() throws Exception { + //Setup client, DB, and the container for which we will create stored procedures + //The container partition key will be id + setUp(); + + //Create stored procedure and list all stored procedures that have been created. + createStoredProcedure(); + readAllSprocs(); + + //Execute the stored procedure, which we expect will create an item with id test_doc + executeStoredProcedure(); + + //Perform a point-read to confirm that the item with id test_doc exists + logger.info("Checking that a document was created by the stored procedure..."); + CosmosItemResponse test_resp = container.readItem("test_doc", new PartitionKey("test_doc"), CustomPOJO.class); + logger.info(String.format( + "Result of point-read for document created by stored procedure (200 indicates success): %d", test_resp.getStatusCode())); + } + + public void setUp() throws Exception { + logger.info("Using Azure Cosmos DB endpoint: " + AccountSettings.HOST); + + ConnectionPolicy defaultPolicy = ConnectionPolicy.getDefaultPolicy(); + // Setting the preferred location to Cosmos DB Account region + // West US is just an example. User should set preferred location to the Cosmos DB region closest to the application + defaultPolicy.setPreferredLocations(Lists.newArrayList("West US")); + + // Create sync client + // + client = new CosmosClientBuilder() + .setEndpoint(AccountSettings.HOST) + .setKey(AccountSettings.MASTER_KEY) + .setConnectionPolicy(defaultPolicy) + .setConsistencyLevel(ConsistencyLevel.EVENTUAL) + .buildClient(); + + logger.info("Create database " + databaseName + " with container " + containerName + " if either does not already exist.\n"); + + database = client.createDatabaseIfNotExists(databaseName).getDatabase(); + + CosmosContainerProperties containerProperties = + new CosmosContainerProperties(containerName, "/id"); + container = database.createContainerIfNotExists(containerProperties, 400).getContainer(); + } + + public void shutdown() throws Exception { + //Safe clean & close + deleteStoredProcedure(); + } + + public void createStoredProcedure() throws Exception { + logger.info("Creating stored procedure..."); + + sprocId = "createMyDocument"; + String sprocBody = "function createMyDocument() {\n" + + "var documentToCreate = {\"id\":\"test_doc\"}\n" + + "var context = getContext();\n" + + "var collection = context.getCollection();\n" + + "var accepted = collection.createDocument(collection.getSelfLink(), documentToCreate,\n" + + " function (err, documentCreated) {\n" + + "if (err) throw new Error('Error' + err.message);\n" + + "context.getResponse().setBody(documentCreated.id)\n" + + "});\n" + + "if (!accepted) return;\n" + + "}"; + CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(sprocId, sprocBody); + container.getScripts() + .createStoredProcedure(storedProcedureDef, + new CosmosStoredProcedureRequestOptions()); + } + + private void readAllSprocs() throws Exception { + logger.info("Listing all stored procedures associated with container " + containerName + "\n"); + + FeedOptions feedOptions = new FeedOptions(); + CosmosPagedIterable feedResponseIterable = + container.getScripts().readAllStoredProcedures(feedOptions); + + Iterator feedResponseIterator = feedResponseIterable.iterator(); + + while (feedResponseIterator.hasNext()) { + CosmosStoredProcedureProperties storedProcedureProperties = feedResponseIterator.next(); + logger.info(String.format("Stored Procedure: %s", storedProcedureProperties)); + } + } + + public void executeStoredProcedure() throws Exception { + logger.info(String.format("Executing stored procedure %s...\n\n", sprocId)); + + CosmosStoredProcedureRequestOptions options = new CosmosStoredProcedureRequestOptions(); + options.setPartitionKey(new PartitionKey("test_doc")); + CosmosStoredProcedureResponse executeResponse = container.getScripts() + .getStoredProcedure(sprocId) + .execute(null, options); + + logger.info(String.format("Stored procedure %s returned %s (HTTP %d), at cost %.3f RU.\n", + sprocId, + executeResponse.responseAsString(), + executeResponse.getStatusCode(), + executeResponse.getRequestCharge())); + } + + public void deleteStoredProcedure() throws Exception { + logger.info("-Deleting stored procedure...\n"); + container.getScripts() + .getStoredProcedure(sprocId) + .delete(); + logger.info("-Deleting database...\n"); + database.delete(); + logger.info("-Closing client instance...\n"); + client.close(); + logger.info("Done."); + } +} diff --git a/src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java b/src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java new file mode 100644 index 0000000..a9bb4fe --- /dev/null +++ b/src/main/java/com/azure/cosmos/examples/usermanagement/sync/UserManagementQuickstartSync.java @@ -0,0 +1,4 @@ +package com.azure.cosmos.examples.usermanagement.sync; + +public class UserManagementQuickstartSync { +} diff --git a/src/main/resources/log4j2.properties b/src/main/resources/log4j2.properties new file mode 100644 index 0000000..a92d123 --- /dev/null +++ b/src/main/resources/log4j2.properties @@ -0,0 +1,16 @@ +# this is the log4j configuration for tests +# Set root logger level to WARN and its appender to STDOUT. +filter.threshold.type = ThresholdFilter +filter.threshold.level = DEBUG +rootLogger.level=INFO +rootLogger.appenderRef.stdout.ref=STDOUT +logger.netty.name=io.netty +logger.netty.level=INFO +logger.cosmos.name=com.azure.cosmos +logger.cosmos.level=INFO +# STDOUT is a ConsoleAppender and uses PatternLayout. +appender.console.name=STDOUT +appender.console.type=Console +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=%m%n + diff --git a/src/main/resources/multi-master-sample-config.properties b/src/main/resources/multi-master-sample-config.properties new file mode 100644 index 0000000..42c2030 --- /dev/null +++ b/src/main/resources/multi-master-sample-config.properties @@ -0,0 +1,8 @@ +endpoint= +key= +regions=North Central US;North Europe;Southeast Asia +databaseName=multiMasterDemoDB +manualCollectionName=myManualCollection +lwwCollectionName=myLwwCollection +udpCollectionName=myUdpCollection +basicCollectionName=myBasicCollection \ No newline at end of file diff --git a/src/main/resources/resolver-storedproc.txt b/src/main/resources/resolver-storedproc.txt new file mode 100644 index 0000000..e856721 --- /dev/null +++ b/src/main/resources/resolver-storedproc.txt @@ -0,0 +1,45 @@ +function resolver(incomingRecord, existingRecord, isTombstone, conflictingRecords) { + var collection = getContext().getCollection(); + if (!incomingRecord) { + if (existingRecord) { + collection.deleteDocument(existingRecord._self, {}, function(err, responseOptions) { + if (err) throw err; + }); + } + } else if (isTombstone) { + // delete always wins. + } else { + var documentToUse = incomingRecord; + if (existingRecord) { + if (documentToUse.regionId < existingRecord.regionId) { + documentToUse = existingRecord; + } + } + var i; + for (i = 0; i < conflictingRecords.length; i++) { + if (documentToUse.regionId < conflictingRecords[i].regionId) { + documentToUse = conflictingRecords[i]; + } + } + tryDelete(conflictingRecords, incomingRecord, existingRecord, documentToUse); + } + function tryDelete(documents, incoming, existing, documentToInsert) { + if (documents.length > 0) { + collection.deleteDocument(documents[0]._self, {}, function(err, responseOptions) { + if (err) throw err; + documents.shift(); + tryDelete(documents, incoming, existing, documentToInsert); + }); + } else if (existing) { + collection.replaceDocument(existing._self, documentToInsert, + function(err, documentCreated) { + if (err) throw err; + }); + } else { + collection.createDocument(collection.getSelfLink(), documentToInsert, + function(err, documentCreated) { + if (err) throw err; + }); + } + } +} \ No newline at end of file